Commit ee1dfad5 authored by Mike Snitzer's avatar Mike Snitzer

dm: fix bio splitting and its bio completion order for regular IO

dm_queue_split() is removed because __split_and_process_bio() _must_
handle splitting bios to ensure proper bio submission and completion
ordering as a bio is split.

Otherwise, multiple recursive calls to ->submit_bio will cause multiple
split bios to be allocated from the same ->bio_split mempool at the same
time. This would result in deadlock in low memory conditions because no
progress could be made (only one bio is available in ->bio_split
mempool).

This fix has been verified to still fix the loss of performance, due
to excess splitting, that commit 120c9257 provided.

Fixes: 120c9257 ("Revert "dm: always call blk_queue_split() in dm_process_bio()"")
Cc: stable@vger.kernel.org # 5.0+, requires custom backport due to 5.9 changes
Reported-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent beaeb4f3
...@@ -1724,23 +1724,6 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, ...@@ -1724,23 +1724,6 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
return ret; return ret;
} }
static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
{
unsigned len, sector_count;
sector_count = bio_sectors(*bio);
len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
if (sector_count > len) {
struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
bio_chain(split, *bio);
trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
}
}
static blk_qc_t dm_process_bio(struct mapped_device *md, static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio) struct dm_table *map, struct bio *bio)
{ {
...@@ -1768,13 +1751,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, ...@@ -1768,13 +1751,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
if (current->bio_list) { if (current->bio_list) {
if (is_abnormal_io(bio)) if (is_abnormal_io(bio))
blk_queue_split(&bio); blk_queue_split(&bio);
else /* regular IO is split by __split_and_process_bio */
dm_queue_split(md, ti, &bio);
} }
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
return __process_bio(md, map, bio, ti); return __process_bio(md, map, bio, ti);
else
return __split_and_process_bio(md, map, bio); return __split_and_process_bio(md, map, bio);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment