Commit 3e9f1be1 authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Mike Snitzer

dm mpath: remove process_queued_ios()

process_queued_ios() has served 3 functions:
  1) select pg and pgpath if none is selected
  2) start pg_init if requested
  3) dispatch queued IOs when pg is ready

Basically, a call to queue_work(process_queued_ios) can be replaced by
dm_table_run_md_queue_async(), which runs request queue and ends up
calling map_io(), which does 1), 2) and 3).

Exception is when !pg_ready() (which means either pg_init is running or
requested), then multipath_busy() prevents map_io() being called from
request_fn.

If pg_init is running, it should be ok as long as pg_init_done() does
the right thing when pg_init is completed, I.e.: restart pg_init if
!pg_ready() or call dm_table_run_md_queue_async() to kick map_io().

If pg_init is requested, we have to make sure the request is detected
and pg_init will be started.  pg_init is requested in 3 places:
  a) __choose_pgpath() in map_io()
  b) __choose_pgpath() in multipath_ioctl()
  c) pg_init retry in pg_init_done()
a) is ok because map_io() calls __pg_init_all_paths(), which does 2).
b) needs a call to __pg_init_all_paths(), which does 2).
c) needs a call to __pg_init_all_paths(), which does 2).

So this patch removes process_queued_ios() and ensures that
__pg_init_all_paths() is called at the appropriate locations.
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Reviewed-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
parent e8099177
...@@ -93,8 +93,6 @@ struct multipath { ...@@ -93,8 +93,6 @@ struct multipath {
unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
struct work_struct process_queued_ios;
struct work_struct trigger_event; struct work_struct trigger_event;
/* /*
...@@ -119,7 +117,6 @@ typedef int (*action_fn) (struct pgpath *pgpath); ...@@ -119,7 +117,6 @@ typedef int (*action_fn) (struct pgpath *pgpath);
static struct kmem_cache *_mpio_cache; static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work); static void activate_path(struct work_struct *work);
static int __pgpath_busy(struct pgpath *pgpath); static int __pgpath_busy(struct pgpath *pgpath);
...@@ -197,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) ...@@ -197,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
m->queue_io = 1; m->queue_io = 1;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event); INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait); init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex); mutex_init(&m->work_mutex);
...@@ -254,16 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info) ...@@ -254,16 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info)
* Path selection * Path selection
*-----------------------------------------------*/ *-----------------------------------------------*/
static void __pg_init_all_paths(struct multipath *m) static int __pg_init_all_paths(struct multipath *m)
{ {
struct pgpath *pgpath; struct pgpath *pgpath;
unsigned long pg_init_delay = 0; unsigned long pg_init_delay = 0;
if (m->pg_init_in_progress || m->pg_init_disabled) if (m->pg_init_in_progress || m->pg_init_disabled)
return; return 0;
m->pg_init_count++; m->pg_init_count++;
m->pg_init_required = 0; m->pg_init_required = 0;
/* Check here to reset pg_init_required */
if (!m->current_pg)
return 0;
if (m->pg_init_delay_retry) if (m->pg_init_delay_retry)
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
...@@ -275,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m) ...@@ -275,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m)
pg_init_delay)) pg_init_delay))
m->pg_init_in_progress++; m->pg_init_in_progress++;
} }
return m->pg_init_in_progress;
} }
static void __switch_pg(struct multipath *m, struct pgpath *pgpath) static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
...@@ -436,40 +438,13 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, ...@@ -436,40 +438,13 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
m->saved_queue_if_no_path = queue_if_no_path; m->saved_queue_if_no_path = queue_if_no_path;
m->queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path) if (!m->queue_if_no_path)
queue_work(kmultipathd, &m->process_queued_ios); dm_table_run_md_queue_async(m->ti->table);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
return 0; return 0;
} }
static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
struct pgpath *pgpath = NULL;
unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
__choose_pgpath(m, 0);
pgpath = m->current_pgpath;
if ((pgpath && !m->queue_io) ||
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
if (pgpath && m->pg_init_required)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
dm_table_run_md_queue_async(m->ti->table);
}
/* /*
* An event is triggered whenever a path is taken out of use. * An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass. * Includes path failure and PG bypass.
...@@ -1016,7 +991,7 @@ static int reinstate_path(struct pgpath *pgpath) ...@@ -1016,7 +991,7 @@ static int reinstate_path(struct pgpath *pgpath)
if (!m->nr_valid_paths++) { if (!m->nr_valid_paths++) {
m->current_pgpath = NULL; m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios); dm_table_run_md_queue_async(m->ti->table);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
m->pg_init_in_progress++; m->pg_init_in_progress++;
...@@ -1212,11 +1187,12 @@ static void pg_init_done(void *data, int errors) ...@@ -1212,11 +1187,12 @@ static void pg_init_done(void *data, int errors)
/* Activations of other paths are still on going */ /* Activations of other paths are still on going */
goto out; goto out;
if (!m->pg_init_required) if (m->pg_init_required) {
m->queue_io = 0; m->pg_init_delay_retry = delay_retry;
if (__pg_init_all_paths(m))
m->pg_init_delay_retry = delay_retry; goto out;
queue_work(kmultipathd, &m->process_queued_ios); }
m->queue_io = 0;
/* /*
* Wake up any thread waiting to suspend. * Wake up any thread waiting to suspend.
...@@ -1592,8 +1568,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, ...@@ -1592,8 +1568,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
r = err; r = err;
} }
if (r == -ENOTCONN && !fatal_signal_pending(current)) if (r == -ENOTCONN && !fatal_signal_pending(current)) {
queue_work(kmultipathd, &m->process_queued_ios); spin_lock_irqsave(&m->lock, flags);
if (!m->current_pg) {
/* Path status changed, redo selection */
__choose_pgpath(m, 0);
}
if (m->pg_init_required)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
dm_table_run_md_queue_async(m->ti->table);
}
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment