Commit a8ac51e4 authored by Mike Snitzer's avatar Mike Snitzer

dm rq: add DM_MAPIO_DELAY_REQUEUE to delay requeue of blk-mq requests

Otherwise blk-mq will immediately dispatch requests that are requeued
via a BLK_MQ_RQ_QUEUE_BUSY return from blk_mq_ops .queue_rq.

Delayed requeue is implemented using blk_mq_delay_kick_requeue_list()
with a delay of 5 secs.  In the context of DM multipath (all paths down)
it doesn't make any sense to requeue more quickly.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 9f4c3f87
...@@ -336,20 +336,21 @@ static void dm_old_requeue_request(struct request *rq) ...@@ -336,20 +336,21 @@ static void dm_old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
static void dm_mq_requeue_request(struct request *rq) static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned long flags; unsigned long flags;
blk_mq_requeue_request(rq); blk_mq_requeue_request(rq);
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
if (!blk_queue_stopped(q)) if (!blk_queue_stopped(q))
blk_mq_kick_requeue_list(q); blk_mq_delay_kick_requeue_list(q, msecs);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
static void dm_requeue_original_request(struct mapped_device *md, static void dm_requeue_original_request(struct mapped_device *md,
struct request *rq) struct request *rq, bool delay_requeue)
{ {
int rw = rq_data_dir(rq); int rw = rq_data_dir(rq);
...@@ -359,7 +360,7 @@ static void dm_requeue_original_request(struct mapped_device *md, ...@@ -359,7 +360,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
dm_old_requeue_request(rq); dm_old_requeue_request(rq);
else else
dm_mq_requeue_request(rq); dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
rq_completed(md, rw, false); rq_completed(md, rw, false);
} }
...@@ -389,7 +390,7 @@ static void dm_done(struct request *clone, int error, bool mapped) ...@@ -389,7 +390,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
return; return;
else if (r == DM_ENDIO_REQUEUE) else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */ /* The target wants to requeue the I/O */
dm_requeue_original_request(tio->md, tio->orig); dm_requeue_original_request(tio->md, tio->orig, false);
else { else {
DMWARN("unimplemented target endio return value: %d", r); DMWARN("unimplemented target endio return value: %d", r);
BUG(); BUG();
...@@ -629,8 +630,8 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) ...@@ -629,8 +630,8 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
/* /*
* Returns: * Returns:
* 0 : the request has been processed * DM_MAPIO_* : the request has been processed as indicated
* DM_MAPIO_REQUEUE : the original request needs to be requeued * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
* < 0 : the request was completed due to failure * < 0 : the request was completed due to failure
*/ */
static int map_request(struct dm_rq_target_io *tio, struct request *rq, static int map_request(struct dm_rq_target_io *tio, struct request *rq,
...@@ -643,6 +644,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, ...@@ -643,6 +644,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
if (tio->clone) { if (tio->clone) {
clone = tio->clone; clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info); r = ti->type->map_rq(ti, clone, &tio->info);
if (r == DM_MAPIO_DELAY_REQUEUE)
return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
} else { } else {
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
if (r < 0) { if (r < 0) {
...@@ -650,9 +653,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, ...@@ -650,9 +653,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
dm_kill_unmapped_request(rq, r); dm_kill_unmapped_request(rq, r);
return r; return r;
} }
if (r != DM_MAPIO_REMAPPED) if (r == DM_MAPIO_REMAPPED &&
return r; setup_clone(clone, rq, tio, GFP_ATOMIC)) {
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */ /* -ENOMEM */
ti->type->release_clone_rq(clone); ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE; return DM_MAPIO_REQUEUE;
...@@ -671,7 +673,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, ...@@ -671,7 +673,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
break; break;
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */ /* The target wants to requeue the I/O */
dm_requeue_original_request(md, tio->orig); break;
case DM_MAPIO_DELAY_REQUEUE:
/* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(md, tio->orig, true);
break; break;
default: default:
if (r > 0) { if (r > 0) {
...@@ -681,10 +686,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, ...@@ -681,10 +686,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
/* The target wants to complete the I/O */ /* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, r); dm_kill_unmapped_request(rq, r);
return r;
} }
return 0; return r;
} }
static void dm_start_request(struct mapped_device *md, struct request *orig) static void dm_start_request(struct mapped_device *md, struct request *orig)
...@@ -727,7 +731,7 @@ static void map_tio_request(struct kthread_work *work) ...@@ -727,7 +731,7 @@ static void map_tio_request(struct kthread_work *work)
struct mapped_device *md = tio->md; struct mapped_device *md = tio->md;
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
dm_requeue_original_request(md, rq); dm_requeue_original_request(md, rq, false);
} }
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
......
...@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state; ...@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_SUBMITTED 0 #define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1 #define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
#define DM_MAPIO_DELAY_REQUEUE 3
#define dm_sector_div64(x, y)( \ #define dm_sector_div64(x, y)( \
{ \ { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment