Commit a4ffc0a0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (44 commits)
  dm raid1: report fault status
  dm raid1: handle read failures
  dm raid1: fix EIO after log failure
  dm raid1: handle recovery failures
  dm raid1: handle write failures
  dm snapshot: combine consecutive exceptions in memory
  dm: stripe enhanced status return
  dm: stripe trigger event on failure
  dm log: auto load modules
  dm: move deferred bio flushing to workqueue
  dm crypt: use async crypto
  dm crypt: prepare async callback fn
  dm crypt: add completion for async
  dm crypt: add async request mempool
  dm crypt: extract scatterlist processing
  dm crypt: tidy io ref counting
  dm crypt: introduce crypt_write_io_loop
  dm crypt: abstract crypt_write_done
  dm crypt: store sector mapping in dm_crypt_io
  dm crypt: move queue functions
  ...
parents d7511ec8 af195ac8
...@@ -204,7 +204,7 @@ config BLK_DEV_DM ...@@ -204,7 +204,7 @@ config BLK_DEV_DM
config DM_DEBUG config DM_DEBUG
boolean "Device mapper debugging support" boolean "Device mapper debugging support"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
---help--- ---help---
Enable this for messages that may help debug device-mapper problems. Enable this for messages that may help debug device-mapper problems.
...@@ -212,7 +212,7 @@ config DM_DEBUG ...@@ -212,7 +212,7 @@ config DM_DEBUG
config DM_CRYPT config DM_CRYPT
tristate "Crypt target support" tristate "Crypt target support"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
select CRYPTO select CRYPTO
select CRYPTO_CBC select CRYPTO_CBC
---help--- ---help---
...@@ -230,34 +230,34 @@ config DM_CRYPT ...@@ -230,34 +230,34 @@ config DM_CRYPT
If unsure, say N. If unsure, say N.
config DM_SNAPSHOT config DM_SNAPSHOT
tristate "Snapshot target (EXPERIMENTAL)" tristate "Snapshot target"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
---help--- ---help---
Allow volume managers to take writable snapshots of a device. Allow volume managers to take writable snapshots of a device.
config DM_MIRROR config DM_MIRROR
tristate "Mirror target (EXPERIMENTAL)" tristate "Mirror target"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
---help--- ---help---
Allow volume managers to mirror logical volumes, also Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'. needed for live data migration tools such as 'pvmove'.
config DM_ZERO config DM_ZERO
tristate "Zero target (EXPERIMENTAL)" tristate "Zero target"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
---help--- ---help---
A target that discards writes, and returns all zeroes for A target that discards writes, and returns all zeroes for
reads. Useful in some recovery situations. reads. Useful in some recovery situations.
config DM_MULTIPATH config DM_MULTIPATH
tristate "Multipath target (EXPERIMENTAL)" tristate "Multipath target"
depends on BLK_DEV_DM && EXPERIMENTAL depends on BLK_DEV_DM
---help--- ---help---
Allow volume managers to support multipath hardware. Allow volume managers to support multipath hardware.
config DM_MULTIPATH_EMC config DM_MULTIPATH_EMC
tristate "EMC CX/AX multipath support (EXPERIMENTAL)" tristate "EMC CX/AX multipath support"
depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL depends on DM_MULTIPATH && BLK_DEV_DM
---help--- ---help---
Multipath support for EMC CX/AX series hardware. Multipath support for EMC CX/AX series hardware.
......
This diff is collapsed.
...@@ -449,7 +449,7 @@ static void persistent_destroy(struct exception_store *store) ...@@ -449,7 +449,7 @@ static void persistent_destroy(struct exception_store *store)
static int persistent_read_metadata(struct exception_store *store) static int persistent_read_metadata(struct exception_store *store)
{ {
int r, new_snapshot; int r, uninitialized_var(new_snapshot);
struct pstore *ps = get_info(store); struct pstore *ps = get_info(store);
/* /*
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dm-ioctl.h> #include <linux/dm-ioctl.h>
#include <linux/hdreg.h> #include <linux/hdreg.h>
#include <linux/compat.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -702,7 +703,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size) ...@@ -702,7 +703,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
int r; int r;
char *new_name = (char *) param + param->data_start; char *new_name = (char *) param + param->data_start;
if (new_name < (char *) param->data || if (new_name < param->data ||
invalid_str(new_name, (void *) param + param_size)) { invalid_str(new_name, (void *) param + param_size)) {
DMWARN("Invalid new logical volume name supplied."); DMWARN("Invalid new logical volume name supplied.");
return -EINVAL; return -EINVAL;
...@@ -728,7 +729,7 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) ...@@ -728,7 +729,7 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
if (!md) if (!md)
return -ENXIO; return -ENXIO;
if (geostr < (char *) param->data || if (geostr < param->data ||
invalid_str(geostr, (void *) param + param_size)) { invalid_str(geostr, (void *) param + param_size)) {
DMWARN("Invalid geometry supplied."); DMWARN("Invalid geometry supplied.");
goto out; goto out;
...@@ -1350,10 +1351,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) ...@@ -1350,10 +1351,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
{ {
struct dm_ioctl tmp, *dmi; struct dm_ioctl tmp, *dmi;
if (copy_from_user(&tmp, user, sizeof(tmp))) if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
return -EFAULT; return -EFAULT;
if (tmp.data_size < sizeof(tmp)) if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
return -EINVAL; return -EINVAL;
dmi = vmalloc(tmp.data_size); dmi = vmalloc(tmp.data_size);
...@@ -1397,13 +1398,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) ...@@ -1397,13 +1398,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
return 0; return 0;
} }
static int ctl_ioctl(struct inode *inode, struct file *file, static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
uint command, ulong u)
{ {
int r = 0; int r = 0;
unsigned int cmd; unsigned int cmd;
struct dm_ioctl *param; struct dm_ioctl *uninitialized_var(param);
struct dm_ioctl __user *user = (struct dm_ioctl __user *) u;
ioctl_fn fn = NULL; ioctl_fn fn = NULL;
size_t param_size; size_t param_size;
...@@ -1471,8 +1470,23 @@ static int ctl_ioctl(struct inode *inode, struct file *file, ...@@ -1471,8 +1470,23 @@ static int ctl_ioctl(struct inode *inode, struct file *file,
return r; return r;
} }
static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
{
return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u);
}
#ifdef CONFIG_COMPAT
static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
{
return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
}
#else
#define dm_compat_ctl_ioctl NULL
#endif
static const struct file_operations _ctl_fops = { static const struct file_operations _ctl_fops = {
.ioctl = ctl_ioctl, .unlocked_ioctl = dm_ctl_ioctl,
.compat_ioctl = dm_compat_ctl_ioctl,
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
......
...@@ -41,7 +41,7 @@ int dm_unregister_dirty_log_type(struct dirty_log_type *type) ...@@ -41,7 +41,7 @@ int dm_unregister_dirty_log_type(struct dirty_log_type *type)
return 0; return 0;
} }
static struct dirty_log_type *get_type(const char *type_name) static struct dirty_log_type *_get_type(const char *type_name)
{ {
struct dirty_log_type *type; struct dirty_log_type *type;
...@@ -61,6 +61,55 @@ static struct dirty_log_type *get_type(const char *type_name) ...@@ -61,6 +61,55 @@ static struct dirty_log_type *get_type(const char *type_name)
return NULL; return NULL;
} }
/*
* get_type
* @type_name
*
* Attempt to retrieve the dirty_log_type by name. If not already
* available, attempt to load the appropriate module.
*
* Log modules are named "dm-log-" followed by the 'type_name'.
* Modules may contain multiple types.
* This function will first try the module "dm-log-<type_name>",
* then truncate 'type_name' on the last '-' and try again.
*
* For example, if type_name was "clustered-disk", it would search
* 'dm-log-clustered-disk' then 'dm-log-clustered'.
*
* Returns: dirty_log_type* on success, NULL on failure
*/
static struct dirty_log_type *get_type(const char *type_name)
{
char *p, *type_name_dup;
struct dirty_log_type *type;
type = _get_type(type_name);
if (type)
return type;
type_name_dup = kstrdup(type_name, GFP_KERNEL);
if (!type_name_dup) {
DMWARN("No memory left to attempt log module load for \"%s\"",
type_name);
return NULL;
}
while (request_module("dm-log-%s", type_name_dup) ||
!(type = _get_type(type_name))) {
p = strrchr(type_name_dup, '-');
if (!p)
break;
p[0] = '\0';
}
if (!type)
DMWARN("Module for logging type \"%s\" not found.", type_name);
kfree(type_name_dup);
return type;
}
static void put_type(struct dirty_log_type *type) static void put_type(struct dirty_log_type *type)
{ {
spin_lock(&_lock); spin_lock(&_lock);
......
...@@ -106,7 +106,7 @@ typedef int (*action_fn) (struct pgpath *pgpath); ...@@ -106,7 +106,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache; static struct kmem_cache *_mpio_cache;
struct workqueue_struct *kmultipathd; static struct workqueue_struct *kmultipathd;
static void process_queued_ios(struct work_struct *work); static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
......
This diff is collapsed.
...@@ -213,11 +213,15 @@ static void unregister_snapshot(struct dm_snapshot *s) ...@@ -213,11 +213,15 @@ static void unregister_snapshot(struct dm_snapshot *s)
/* /*
* Implementation of the exception hash tables. * Implementation of the exception hash tables.
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/ */
static int init_exception_table(struct exception_table *et, uint32_t size) static int init_exception_table(struct exception_table *et, uint32_t size,
unsigned hash_shift)
{ {
unsigned int i; unsigned int i;
et->hash_shift = hash_shift;
et->hash_mask = size - 1; et->hash_mask = size - 1;
et->table = dm_vcalloc(size, sizeof(struct list_head)); et->table = dm_vcalloc(size, sizeof(struct list_head));
if (!et->table) if (!et->table)
...@@ -248,7 +252,7 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache * ...@@ -248,7 +252,7 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
{ {
return chunk & et->hash_mask; return (chunk >> et->hash_shift) & et->hash_mask;
} }
static void insert_exception(struct exception_table *eh, static void insert_exception(struct exception_table *eh,
...@@ -275,7 +279,8 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et, ...@@ -275,7 +279,8 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
slot = &et->table[exception_hash(et, chunk)]; slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list) list_for_each_entry (e, slot, hash_list)
if (e->old_chunk == chunk) if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e; return e;
return NULL; return NULL;
...@@ -307,6 +312,49 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) ...@@ -307,6 +312,49 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
mempool_free(pe, pending_pool); mempool_free(pe, pending_pool);
} }
static void insert_completed_exception(struct dm_snapshot *s,
struct dm_snap_exception *new_e)
{
struct exception_table *eh = &s->complete;
struct list_head *l;
struct dm_snap_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
/* Add immediately if this table doesn't support consecutive chunks */
if (!eh->hash_shift)
goto out;
/* List is ordered by old_chunk */
list_for_each_entry_reverse(e, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
dm_consecutive_chunk_count(e) + 1)) {
dm_consecutive_chunk_count_inc(e);
free_exception(new_e);
return;
}
/* Insert before an existing chunk? */
if (new_e->old_chunk == (e->old_chunk - 1) &&
new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
dm_consecutive_chunk_count_inc(e);
e->old_chunk--;
e->new_chunk--;
free_exception(new_e);
return;
}
if (new_e->old_chunk > e->old_chunk)
break;
}
out:
list_add(&new_e->hash_list, e ? &e->hash_list : l);
}
int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
{ {
struct dm_snap_exception *e; struct dm_snap_exception *e;
...@@ -316,8 +364,12 @@ int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) ...@@ -316,8 +364,12 @@ int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
return -ENOMEM; return -ENOMEM;
e->old_chunk = old; e->old_chunk = old;
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new; e->new_chunk = new;
insert_exception(&s->complete, e);
insert_completed_exception(s, e);
return 0; return 0;
} }
...@@ -333,16 +385,6 @@ static int calc_max_buckets(void) ...@@ -333,16 +385,6 @@ static int calc_max_buckets(void)
return mem; return mem;
} }
/*
* Rounds a number down to a power of 2.
*/
static uint32_t round_down(uint32_t n)
{
while (n & (n - 1))
n &= (n - 1);
return n;
}
/* /*
* Allocate room for a suitable hash table. * Allocate room for a suitable hash table.
*/ */
...@@ -361,9 +403,9 @@ static int init_hash_tables(struct dm_snapshot *s) ...@@ -361,9 +403,9 @@ static int init_hash_tables(struct dm_snapshot *s)
hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
hash_size = min(hash_size, max_buckets); hash_size = min(hash_size, max_buckets);
/* Round it down to a power of 2 */ hash_size = rounddown_pow_of_two(hash_size);
hash_size = round_down(hash_size); if (init_exception_table(&s->complete, hash_size,
if (init_exception_table(&s->complete, hash_size)) DM_CHUNK_CONSECUTIVE_BITS))
return -ENOMEM; return -ENOMEM;
/* /*
...@@ -374,7 +416,7 @@ static int init_hash_tables(struct dm_snapshot *s) ...@@ -374,7 +416,7 @@ static int init_hash_tables(struct dm_snapshot *s)
if (hash_size < 64) if (hash_size < 64)
hash_size = 64; hash_size = 64;
if (init_exception_table(&s->pending, hash_size)) { if (init_exception_table(&s->pending, hash_size, 0)) {
exit_exception_table(&s->complete, exception_cache); exit_exception_table(&s->complete, exception_cache);
return -ENOMEM; return -ENOMEM;
} }
...@@ -733,7 +775,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) ...@@ -733,7 +775,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
* Add a proper exception, and remove the * Add a proper exception, and remove the
* in-flight exception from the list. * in-flight exception from the list.
*/ */
insert_exception(&s->complete, e); insert_completed_exception(s, e);
out: out:
remove_exception(&pe->e); remove_exception(&pe->e);
...@@ -867,11 +909,12 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) ...@@ -867,11 +909,12 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
} }
static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
struct bio *bio) struct bio *bio, chunk_t chunk)
{ {
bio->bi_bdev = s->cow->bdev; bio->bi_bdev = s->cow->bdev;
bio->bi_sector = chunk_to_sector(s, e->new_chunk) + bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
(bio->bi_sector & s->chunk_mask); (chunk - e->old_chunk)) +
(bio->bi_sector & s->chunk_mask);
} }
static int snapshot_map(struct dm_target *ti, struct bio *bio, static int snapshot_map(struct dm_target *ti, struct bio *bio,
...@@ -902,7 +945,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, ...@@ -902,7 +945,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
/* If the block is already remapped - use that, else remap it */ /* If the block is already remapped - use that, else remap it */
e = lookup_exception(&s->complete, chunk); e = lookup_exception(&s->complete, chunk);
if (e) { if (e) {
remap_exception(s, e, bio); remap_exception(s, e, bio, chunk);
goto out_unlock; goto out_unlock;
} }
...@@ -919,7 +962,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, ...@@ -919,7 +962,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
goto out_unlock; goto out_unlock;
} }
remap_exception(s, &pe->e, bio); remap_exception(s, &pe->e, bio, chunk);
bio_list_add(&pe->snapshot_bios, bio); bio_list_add(&pe->snapshot_bios, bio);
r = DM_MAPIO_SUBMITTED; r = DM_MAPIO_SUBMITTED;
...@@ -1207,7 +1250,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, ...@@ -1207,7 +1250,7 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
static struct target_type origin_target = { static struct target_type origin_target = {
.name = "snapshot-origin", .name = "snapshot-origin",
.version = {1, 5, 0}, .version = {1, 6, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = origin_ctr, .ctr = origin_ctr,
.dtr = origin_dtr, .dtr = origin_dtr,
...@@ -1218,7 +1261,7 @@ static struct target_type origin_target = { ...@@ -1218,7 +1261,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = { static struct target_type snapshot_target = {
.name = "snapshot", .name = "snapshot",
.version = {1, 5, 0}, .version = {1, 6, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,
......
...@@ -16,19 +16,22 @@ ...@@ -16,19 +16,22 @@
struct exception_table { struct exception_table {
uint32_t hash_mask; uint32_t hash_mask;
unsigned hash_shift;
struct list_head *table; struct list_head *table;
}; };
/* /*
* The snapshot code deals with largish chunks of the disk at a * The snapshot code deals with largish chunks of the disk at a
* time. Typically 64k - 256k. * time. Typically 32k - 512k.
*/ */
/* FIXME: can we get away with limiting these to a uint32_t ? */
typedef sector_t chunk_t; typedef sector_t chunk_t;
/* /*
* An exception is used where an old chunk of data has been * An exception is used where an old chunk of data has been
* replaced by a new one. * replaced by a new one.
* If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
* of chunks that follow contiguously. Remaining bits hold the number of the
* chunk within the device.
*/ */
struct dm_snap_exception { struct dm_snap_exception {
struct list_head hash_list; struct list_head hash_list;
...@@ -37,6 +40,49 @@ struct dm_snap_exception { ...@@ -37,6 +40,49 @@ struct dm_snap_exception {
chunk_t new_chunk; chunk_t new_chunk;
}; };
/*
* Funtions to manipulate consecutive chunks
*/
# if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
# define DM_CHUNK_CONSECUTIVE_BITS 8
# define DM_CHUNK_NUMBER_BITS 56
static inline chunk_t dm_chunk_number(chunk_t chunk)
{
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
{
e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
BUG_ON(!dm_consecutive_chunk_count(e));
}
# else
# define DM_CHUNK_CONSECUTIVE_BITS 0
static inline chunk_t dm_chunk_number(chunk_t chunk)
{
return chunk;
}
static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
{
return 0;
}
static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
{
}
# endif
/* /*
* Abstraction to handle the meta/layout of exception stores (the * Abstraction to handle the meta/layout of exception stores (the
* COW device). * COW device).
......
...@@ -14,10 +14,13 @@ ...@@ -14,10 +14,13 @@
#include <linux/log2.h> #include <linux/log2.h>
#define DM_MSG_PREFIX "striped" #define DM_MSG_PREFIX "striped"
#define DM_IO_ERROR_THRESHOLD 15
struct stripe { struct stripe {
struct dm_dev *dev; struct dm_dev *dev;
sector_t physical_start; sector_t physical_start;
atomic_t error_count;
}; };
struct stripe_c { struct stripe_c {
...@@ -30,9 +33,29 @@ struct stripe_c { ...@@ -30,9 +33,29 @@ struct stripe_c {
uint32_t chunk_shift; uint32_t chunk_shift;
sector_t chunk_mask; sector_t chunk_mask;
/* Needed for handling events */
struct dm_target *ti;
/* Work struct used for triggering events*/
struct work_struct kstriped_ws;
struct stripe stripe[0]; struct stripe stripe[0];
}; };
static struct workqueue_struct *kstriped;
/*
* An event is triggered whenever a drive
* drops out of a stripe volume.
*/
static void trigger_event(struct work_struct *work)
{
struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws);
dm_table_event(sc->ti->table);
}
static inline struct stripe_c *alloc_context(unsigned int stripes) static inline struct stripe_c *alloc_context(unsigned int stripes)
{ {
size_t len; size_t len;
...@@ -63,6 +86,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, ...@@ -63,6 +86,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
return -ENXIO; return -ENXIO;
sc->stripe[stripe].physical_start = start; sc->stripe[stripe].physical_start = start;
return 0; return 0;
} }
...@@ -135,6 +159,11 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -135,6 +159,11 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM; return -ENOMEM;
} }
INIT_WORK(&sc->kstriped_ws, trigger_event);
/* Set pointer to dm target; used in trigger_event */
sc->ti = ti;
sc->stripes = stripes; sc->stripes = stripes;
sc->stripe_width = width; sc->stripe_width = width;
ti->split_io = chunk_size; ti->split_io = chunk_size;
...@@ -158,9 +187,11 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -158,9 +187,11 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
kfree(sc); kfree(sc);
return r; return r;
} }
atomic_set(&(sc->stripe[i].error_count), 0);
} }
ti->private = sc; ti->private = sc;
return 0; return 0;
} }
...@@ -172,6 +203,7 @@ static void stripe_dtr(struct dm_target *ti) ...@@ -172,6 +203,7 @@ static void stripe_dtr(struct dm_target *ti)
for (i = 0; i < sc->stripes; i++) for (i = 0; i < sc->stripes; i++)
dm_put_device(ti, sc->stripe[i].dev); dm_put_device(ti, sc->stripe[i].dev);
flush_workqueue(kstriped);
kfree(sc); kfree(sc);
} }
...@@ -190,16 +222,37 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, ...@@ -190,16 +222,37 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
/*
* Stripe status:
*
* INFO
* #stripes [stripe_name <stripe_name>] [group word count]
* [error count 'A|D' <error count 'A|D'>]
*
* TABLE
* #stripes [stripe chunk size]
* [stripe_name physical_start <stripe_name physical_start>]
*
*/
static int stripe_status(struct dm_target *ti, static int stripe_status(struct dm_target *ti,
status_type_t type, char *result, unsigned int maxlen) status_type_t type, char *result, unsigned int maxlen)
{ {
struct stripe_c *sc = (struct stripe_c *) ti->private; struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
unsigned int sz = 0; unsigned int sz = 0;
unsigned int i; unsigned int i;
switch (type) { switch (type) {
case STATUSTYPE_INFO: case STATUSTYPE_INFO:
result[0] = '\0'; DMEMIT("%d ", sc->stripes);
for (i = 0; i < sc->stripes; i++) {
DMEMIT("%s ", sc->stripe[i].dev->name);
buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
'D' : 'A';
}
buffer[i] = '\0';
DMEMIT("1 %s", buffer);
break; break;
case STATUSTYPE_TABLE: case STATUSTYPE_TABLE:
...@@ -213,13 +266,52 @@ static int stripe_status(struct dm_target *ti, ...@@ -213,13 +266,52 @@ static int stripe_status(struct dm_target *ti,
return 0; return 0;
} }
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
unsigned i;
char major_minor[16];
struct stripe_c *sc = ti->private;
if (!error)
return 0; /* I/O complete */
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
return error;
if (error == -EOPNOTSUPP)
return error;
memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d",
bio->bi_bdev->bd_disk->major,
bio->bi_bdev->bd_disk->first_minor);
/*
* Test to see which stripe drive triggered the event
* and increment error count for all stripes on that device.
* If the error count for a given device exceeds the threshold
* value we will no longer trigger any further events.
*/
for (i = 0; i < sc->stripes; i++)
if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
queue_work(kstriped, &sc->kstriped_ws);
}
return error;
}
static struct target_type stripe_target = { static struct target_type stripe_target = {
.name = "striped", .name = "striped",
.version= {1, 0, 2}, .version = {1, 1, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = stripe_ctr, .ctr = stripe_ctr,
.dtr = stripe_dtr, .dtr = stripe_dtr,
.map = stripe_map, .map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status, .status = stripe_status,
}; };
...@@ -231,6 +323,13 @@ int __init dm_stripe_init(void) ...@@ -231,6 +323,13 @@ int __init dm_stripe_init(void)
if (r < 0) if (r < 0)
DMWARN("target registration failed"); DMWARN("target registration failed");
kstriped = create_singlethread_workqueue("kstriped");
if (!kstriped) {
DMERR("failed to create workqueue kstriped");
dm_unregister_target(&stripe_target);
return -ENOMEM;
}
return r; return r;
} }
...@@ -239,5 +338,7 @@ void dm_stripe_exit(void) ...@@ -239,5 +338,7 @@ void dm_stripe_exit(void)
if (dm_unregister_target(&stripe_target)) if (dm_unregister_target(&stripe_target))
DMWARN("target unregistration failed"); DMWARN("target unregistration failed");
destroy_workqueue(kstriped);
return; return;
} }
...@@ -287,9 +287,8 @@ static void free_devices(struct list_head *devices) ...@@ -287,9 +287,8 @@ static void free_devices(struct list_head *devices)
{ {
struct list_head *tmp, *next; struct list_head *tmp, *next;
for (tmp = devices->next; tmp != devices; tmp = next) { list_for_each_safe(tmp, next, devices) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
next = tmp->next;
kfree(dd); kfree(dd);
} }
} }
...@@ -476,7 +475,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, ...@@ -476,7 +475,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
int mode, struct dm_dev **result) int mode, struct dm_dev **result)
{ {
int r; int r;
dev_t dev; dev_t uninitialized_var(dev);
struct dm_dev *dd; struct dm_dev *dd;
unsigned int major, minor; unsigned int major, minor;
...@@ -805,7 +804,7 @@ static int setup_indexes(struct dm_table *t) ...@@ -805,7 +804,7 @@ static int setup_indexes(struct dm_table *t)
return -ENOMEM; return -ENOMEM;
/* set up internal nodes, bottom-up */ /* set up internal nodes, bottom-up */
for (i = t->depth - 2, total = 0; i >= 0; i--) { for (i = t->depth - 2; i >= 0; i--) {
t->index[i] = indexes; t->index[i] = indexes;
indexes += (KEYS_PER_NODE * t->counts[i]); indexes += (KEYS_PER_NODE * t->counts[i]);
setup_btree_index(i, t); setup_btree_index(i, t);
...@@ -993,12 +992,11 @@ int dm_table_resume_targets(struct dm_table *t) ...@@ -993,12 +992,11 @@ int dm_table_resume_targets(struct dm_table *t)
int dm_table_any_congested(struct dm_table *t, int bdi_bits) int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{ {
struct list_head *d, *devices; struct dm_dev *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0; int r = 0;
devices = dm_table_get_devices(t); list_for_each_entry(dd, devices, list) {
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
struct request_queue *q = bdev_get_queue(dd->bdev); struct request_queue *q = bdev_get_queue(dd->bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits); r |= bdi_congested(&q->backing_dev_info, bdi_bits);
} }
...@@ -1008,10 +1006,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) ...@@ -1008,10 +1006,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
void dm_table_unplug_all(struct dm_table *t) void dm_table_unplug_all(struct dm_table *t)
{ {
struct list_head *d, *devices = dm_table_get_devices(t); struct dm_dev *dd;
struct list_head *devices = dm_table_get_devices(t);
for (d = devices->next; d != devices; d = d->next) { list_for_each_entry(dd, devices, list) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
struct request_queue *q = bdev_get_queue(dd->bdev); struct request_queue *q = bdev_get_queue(dd->bdev);
blk_unplug(q); blk_unplug(q);
......
This diff is collapsed.
...@@ -78,7 +78,6 @@ ...@@ -78,7 +78,6 @@
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/if_bonding.h> #include <linux/if_bonding.h>
#include <linux/watchdog.h> #include <linux/watchdog.h>
#include <linux/dm-ioctl.h>
#include <linux/soundcard.h> #include <linux/soundcard.h>
#include <linux/lp.h> #include <linux/lp.h>
...@@ -1993,39 +1992,6 @@ COMPATIBLE_IOCTL(STOP_ARRAY_RO) ...@@ -1993,39 +1992,6 @@ COMPATIBLE_IOCTL(STOP_ARRAY_RO)
COMPATIBLE_IOCTL(RESTART_ARRAY_RW) COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
COMPATIBLE_IOCTL(GET_BITMAP_FILE) COMPATIBLE_IOCTL(GET_BITMAP_FILE)
ULONG_IOCTL(SET_BITMAP_FILE) ULONG_IOCTL(SET_BITMAP_FILE)
/* DM */
COMPATIBLE_IOCTL(DM_VERSION_32)
COMPATIBLE_IOCTL(DM_REMOVE_ALL_32)
COMPATIBLE_IOCTL(DM_LIST_DEVICES_32)
COMPATIBLE_IOCTL(DM_DEV_CREATE_32)
COMPATIBLE_IOCTL(DM_DEV_REMOVE_32)
COMPATIBLE_IOCTL(DM_DEV_RENAME_32)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32)
COMPATIBLE_IOCTL(DM_DEV_STATUS_32)
COMPATIBLE_IOCTL(DM_DEV_WAIT_32)
COMPATIBLE_IOCTL(DM_TABLE_LOAD_32)
COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32)
COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
COMPATIBLE_IOCTL(DM_VERSION)
COMPATIBLE_IOCTL(DM_REMOVE_ALL)
COMPATIBLE_IOCTL(DM_LIST_DEVICES)
COMPATIBLE_IOCTL(DM_DEV_CREATE)
COMPATIBLE_IOCTL(DM_DEV_REMOVE)
COMPATIBLE_IOCTL(DM_DEV_RENAME)
COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
COMPATIBLE_IOCTL(DM_DEV_STATUS)
COMPATIBLE_IOCTL(DM_DEV_WAIT)
COMPATIBLE_IOCTL(DM_TABLE_LOAD)
COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
COMPATIBLE_IOCTL(DM_TABLE_DEPS)
COMPATIBLE_IOCTL(DM_TABLE_STATUS)
COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
COMPATIBLE_IOCTL(DM_TARGET_MSG)
COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
/* Big K */ /* Big K */
COMPATIBLE_IOCTL(PIO_FONT) COMPATIBLE_IOCTL(PIO_FONT)
COMPATIBLE_IOCTL(GIO_FONT) COMPATIBLE_IOCTL(GIO_FONT)
......
...@@ -110,15 +110,15 @@ struct target_type { ...@@ -110,15 +110,15 @@ struct target_type {
}; };
struct io_restrictions { struct io_restrictions {
unsigned int max_sectors; unsigned long bounce_pfn;
unsigned short max_phys_segments; unsigned long seg_boundary_mask;
unsigned short max_hw_segments; unsigned max_hw_sectors;
unsigned short hardsect_size; unsigned max_sectors;
unsigned int max_segment_size; unsigned max_segment_size;
unsigned int max_hw_sectors; unsigned short hardsect_size;
unsigned long seg_boundary_mask; unsigned short max_hw_segments;
unsigned long bounce_pfn; unsigned short max_phys_segments;
unsigned char no_cluster; /* inverted so that 0 is default */ unsigned char no_cluster; /* inverted so that 0 is default */
}; };
struct dm_target { struct dm_target {
......
...@@ -232,36 +232,6 @@ enum { ...@@ -232,36 +232,6 @@ enum {
DM_DEV_SET_GEOMETRY_CMD DM_DEV_SET_GEOMETRY_CMD
}; };
/*
* The dm_ioctl struct passed into the ioctl is just the header
* on a larger chunk of memory. On x86-64 and other
* architectures the dm-ioctl struct will be padded to an 8 byte
* boundary so the size will be different, which would change the
* ioctl code - yes I really messed up. This hack forces these
* architectures to have the correct ioctl code.
*/
#ifdef CONFIG_COMPAT
typedef char ioctl_struct[308];
#define DM_VERSION_32 _IOWR(DM_IOCTL, DM_VERSION_CMD, ioctl_struct)
#define DM_REMOVE_ALL_32 _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, ioctl_struct)
#define DM_LIST_DEVICES_32 _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, ioctl_struct)
#define DM_DEV_CREATE_32 _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, ioctl_struct)
#define DM_DEV_REMOVE_32 _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, ioctl_struct)
#define DM_DEV_RENAME_32 _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, ioctl_struct)
#define DM_DEV_SUSPEND_32 _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, ioctl_struct)
#define DM_DEV_STATUS_32 _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, ioctl_struct)
#define DM_DEV_WAIT_32 _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, ioctl_struct)
#define DM_TABLE_LOAD_32 _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, ioctl_struct)
#define DM_TABLE_CLEAR_32 _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, ioctl_struct)
#define DM_TABLE_DEPS_32 _IOWR(DM_IOCTL, DM_TABLE_DEPS_CMD, ioctl_struct)
#define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct)
#define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct)
#define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct)
#define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct)
#endif
#define DM_IOCTL 0xfd #define DM_IOCTL 0xfd
#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) #define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl)
...@@ -286,9 +256,9 @@ typedef char ioctl_struct[308]; ...@@ -286,9 +256,9 @@ typedef char ioctl_struct[308];
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 12 #define DM_VERSION_MINOR 13
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2007-10-02)" #define DM_VERSION_EXTRA "-ioctl (2007-10-18)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment