Commit 0f1e5b5d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-4.1-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device-mapper fixes from Mike Snitzer:
 "Quite a few fixes for DM's blk-mq support thanks to extra DM multipath
  testing from Junichi Nomura and Bart Van Assche.

  Also fix a casting bug in dm_merge_bvec() that could cause only a
  single page to be added to a bio (Joe identified this while testing
  dm-cache writeback)"

* tag 'dm-4.1-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix casting bug in dm_merge_bvec()
  dm: fix reload failure of 0 path multipath mapping on blk-mq devices
  dm: fix false warning in free_rq_clone() for unmapped requests
  dm: requeue from blk-mq dm_mq_queue_rq() using BLK_MQ_RQ_QUEUE_BUSY
  dm mpath: fix leak of dm_mpath_io structure in blk-mq .queue_rq error path
  dm: fix NULL pointer when clone_and_map_rq returns !DM_MAPIO_REMAPPED
  dm: run queue on re-queue
parents c2102f3d 1c220c69
...@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, ...@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
/* blk-mq request-based interface */ /* blk-mq request-based interface */
*__clone = blk_get_request(bdev_get_queue(bdev), *__clone = blk_get_request(bdev_get_queue(bdev),
rq_data_dir(rq), GFP_ATOMIC); rq_data_dir(rq), GFP_ATOMIC);
if (IS_ERR(*__clone)) if (IS_ERR(*__clone)) {
/* ENOMEM, requeue */ /* ENOMEM, requeue */
clear_mapinfo(m, map_context);
return r; return r;
}
(*__clone)->bio = (*__clone)->biotail = NULL; (*__clone)->bio = (*__clone)->biotail = NULL;
(*__clone)->rq_disk = bdev->bd_disk; (*__clone)->rq_disk = bdev->bd_disk;
(*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
......
...@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) ...@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
} }
EXPORT_SYMBOL(dm_consume_args); EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_request_based(unsigned table_type)
{
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
}
static int dm_table_set_type(struct dm_table *t) static int dm_table_set_type(struct dm_table *t)
{ {
unsigned i; unsigned i;
...@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
* Determine the type from the live device. * Determine the type from the live device.
* Default to bio-based if device is new. * Default to bio-based if device is new.
*/ */
if (live_md_type == DM_TYPE_REQUEST_BASED || if (__table_type_request_based(live_md_type))
live_md_type == DM_TYPE_MQ_REQUEST_BASED)
request_based = 1; request_based = 1;
else else
bio_based = 1; bio_based = 1;
...@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
} }
t->type = DM_TYPE_MQ_REQUEST_BASED; t->type = DM_TYPE_MQ_REQUEST_BASED;
} else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
/* inherit live MD type */ /* inherit live MD type */
t->type = live_md_type; t->type = live_md_type;
...@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) ...@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
bool dm_table_request_based(struct dm_table *t) bool dm_table_request_based(struct dm_table *t)
{ {
unsigned table_type = dm_table_get_type(t); return __table_type_request_based(dm_table_get_type(t));
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
} }
bool dm_table_mq_request_based(struct dm_table *t) bool dm_table_mq_request_based(struct dm_table *t)
......
...@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) ...@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md); dm_put(md);
} }
static void free_rq_clone(struct request *clone, bool must_be_mapped) static void free_rq_clone(struct request *clone)
{ {
struct dm_rq_target_io *tio = clone->end_io_data; struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md; struct mapped_device *md = tio->md;
WARN_ON_ONCE(must_be_mapped && !clone->q);
blk_rq_unprep_clone(clone); blk_rq_unprep_clone(clone);
if (md->type == DM_TYPE_MQ_REQUEST_BASED) if (md->type == DM_TYPE_MQ_REQUEST_BASED)
...@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error) ...@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len; rq->sense_len = clone->sense_len;
} }
free_rq_clone(clone, true); free_rq_clone(clone);
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
blk_end_request_all(rq, error); blk_end_request_all(rq, error);
else else
...@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq) ...@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
} }
if (clone) if (clone)
free_rq_clone(clone, false); free_rq_clone(clone);
} }
/* /*
...@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq) ...@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
blk_run_queue_async(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
...@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q, ...@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
struct mapped_device *md = q->queuedata; struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table_fast(md); struct dm_table *map = dm_get_live_table_fast(md);
struct dm_target *ti; struct dm_target *ti;
sector_t max_sectors; sector_t max_sectors, max_size = 0;
int max_size = 0;
if (unlikely(!map)) if (unlikely(!map))
goto out; goto out;
...@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q, ...@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
max_sectors = min(max_io_len(bvm->bi_sector, ti), max_sectors = min(max_io_len(bvm->bi_sector, ti),
(sector_t) queue_max_sectors(q)); (sector_t) queue_max_sectors(q));
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
max_size = 0; /*
* FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
* to the targets' merge function since it holds sectors not bytes).
* Just doing this as an interim fix for stable@ because the more
* comprehensive cleanup of switching to sector_t will impact every
* DM target that implements a ->merge hook.
*/
if (max_size > INT_MAX)
max_size = INT_MAX;
/* /*
* merge_bvec_fn() returns number of bytes * merge_bvec_fn() returns number of bytes
...@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q, ...@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
* max is precomputed maximal io size * max is precomputed maximal io size
*/ */
if (max_size && ti->type->merge) if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size); max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
/* /*
* If the target doesn't support merge method and some of the devices * If the target doesn't support merge method and some of the devices
* provided their merge_bvec method (we know this by looking for the * provided their merge_bvec method (we know this by looking for the
...@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, ...@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
dm_kill_unmapped_request(rq, r); dm_kill_unmapped_request(rq, r);
return r; return r;
} }
if (IS_ERR(clone)) if (r != DM_MAPIO_REMAPPED)
return DM_MAPIO_REQUEUE; return r;
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */ /* -ENOMEM */
ti->type->release_clone_rq(clone); ti->type->release_clone_rq(clone);
...@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
/* clone request is allocated at the end of the pdu */ /* clone request is allocated at the end of the pdu */
tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
if (!clone_rq(rq, md, tio, GFP_ATOMIC)) (void) clone_rq(rq, md, tio, GFP_ATOMIC);
return BLK_MQ_RQ_QUEUE_BUSY;
queue_kthread_work(&md->kworker, &tio->work); queue_kthread_work(&md->kworker, &tio->work);
} else { } else {
/* Direct call is fine since .queue_rq allows allocations */ /* Direct call is fine since .queue_rq allows allocations */
if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
dm_requeue_unmapped_original_request(md, rq); /* Undo dm_start_request() before requeuing */
rq_completed(md, rq_data_dir(rq), false);
return BLK_MQ_RQ_QUEUE_BUSY;
}
} }
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment