Commit 90add6d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.19/dm-fixes-2' of...

Merge tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM core's bioset initialization so that blk integrity pool is
   properly setup. Remove now unused bioset_init_from_src.

 - Fix DM zoned hang from locking imbalance due to needless check in
   clone_endio().

* tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix zoned locking imbalance due to needless check in clone_endio
  block: remove bioset_init_from_src
  dm: fix bio_set allocation
parents 045fb9c2 dddf3056
...@@ -1747,26 +1747,6 @@ int bioset_init(struct bio_set *bs, ...@@ -1747,26 +1747,6 @@ int bioset_init(struct bio_set *bs,
} }
EXPORT_SYMBOL(bioset_init); EXPORT_SYMBOL(bioset_init);
/*
* Initialize and setup a new bio_set, based on the settings from
* another bio_set.
*/
int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
{
int flags;
flags = 0;
if (src->bvec_pool.min_nr)
flags |= BIOSET_NEED_BVECS;
if (src->rescue_workqueue)
flags |= BIOSET_NEED_RESCUER;
if (src->cache)
flags |= BIOSET_PERCPU_CACHE;
return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
}
EXPORT_SYMBOL(bioset_init_from_src);
static int __init init_bio(void) static int __init init_bio(void)
{ {
int i; int i;
......
...@@ -33,6 +33,14 @@ struct dm_kobject_holder { ...@@ -33,6 +33,14 @@ struct dm_kobject_holder {
* access their members! * access their members!
*/ */
/*
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
struct bio_set bs;
struct bio_set io_bs;
};
struct mapped_device { struct mapped_device {
struct mutex suspend_lock; struct mutex suspend_lock;
...@@ -110,8 +118,7 @@ struct mapped_device { ...@@ -110,8 +118,7 @@ struct mapped_device {
/* /*
* io objects are allocated from here. * io objects are allocated from here.
*/ */
struct bio_set io_bs; struct dm_md_mempools *mempools;
struct bio_set bs;
/* kobject and completion */ /* kobject and completion */
struct dm_kobject_holder kobj_holder; struct dm_kobject_holder kobj_holder;
......
...@@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq, ...@@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
{ {
int r; int r;
r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
dm_rq_bio_constructor, tio); dm_rq_bio_constructor, tio);
if (r) if (r)
return r; return r;
......
...@@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * ...@@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return 0; return 0;
} }
void dm_table_free_md_mempools(struct dm_table *t)
{
dm_free_md_mempools(t->mempools);
t->mempools = NULL;
}
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
{
return t->mempools;
}
static int setup_indexes(struct dm_table *t) static int setup_indexes(struct dm_table *t)
{ {
int i; int i;
......
...@@ -136,14 +136,6 @@ static int get_swap_bios(void) ...@@ -136,14 +136,6 @@ static int get_swap_bios(void)
return latch; return latch;
} }
/*
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
struct bio_set bs;
struct bio_set io_bs;
};
struct table_device { struct table_device {
struct list_head list; struct list_head list;
refcount_t count; refcount_t count;
...@@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) ...@@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio; struct dm_target_io *tio;
struct bio *clone; struct bio *clone;
clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs); clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
/* Set default bdev, but target must bio_set_dev() before issuing IO */ /* Set default bdev, but target must bio_set_dev() before issuing IO */
clone->bi_bdev = md->disk->part0; clone->bi_bdev = md->disk->part0;
...@@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, ...@@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
} else { } else {
struct mapped_device *md = ci->io->md; struct mapped_device *md = ci->io->md;
clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs); clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
&md->mempools->bs);
if (!clone) if (!clone)
return NULL; return NULL;
/* Set default bdev, but target must bio_set_dev() before issuing IO */ /* Set default bdev, but target must bio_set_dev() before issuing IO */
...@@ -1023,22 +1016,18 @@ static void clone_endio(struct bio *bio) ...@@ -1023,22 +1016,18 @@ static void clone_endio(struct bio *bio)
struct dm_io *io = tio->io; struct dm_io *io = tio->io;
struct mapped_device *md = io->md; struct mapped_device *md = io->md;
if (likely(bio->bi_bdev != md->disk->part0)) {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (unlikely(error == BLK_STS_TARGET)) { if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD && if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev)) !bdev_max_discard_sectors(bio->bi_bdev))
disable_discard(md); disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!q->limits.max_write_zeroes_sectors) !bdev_write_zeroes_sectors(bio->bi_bdev))
disable_write_zeroes(md); disable_write_zeroes(md);
} }
if (static_branch_unlikely(&zoned_enabled) && if (static_branch_unlikely(&zoned_enabled) &&
unlikely(blk_queue_is_zoned(q))) unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
dm_zone_endio(io, bio); dm_zone_endio(io, bio);
}
if (endio) { if (endio) {
int r = endio(ti, bio, &error); int r = endio(ti, bio, &error);
...@@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md) ...@@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
{ {
if (md->wq) if (md->wq)
destroy_workqueue(md->wq); destroy_workqueue(md->wq);
bioset_exit(&md->bs); dm_free_md_mempools(md->mempools);
bioset_exit(&md->io_bs);
if (md->dax_dev) { if (md->dax_dev) {
dax_remove_host(md->disk); dax_remove_host(md->disk);
...@@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md) ...@@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md)
kvfree(md); kvfree(md);
} }
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
int ret = 0;
if (dm_table_bio_based(t)) {
/*
* The md may already have mempools that need changing.
* If so, reload bioset because front_pad may have changed
* because a different table was loaded.
*/
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
} else if (bioset_initialized(&md->bs)) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
goto out;
}
BUG_ON(!p ||
bioset_initialized(&md->bs) ||
bioset_initialized(&md->io_bs));
ret = bioset_init_from_src(&md->bs, &p->bs);
if (ret)
goto out;
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
if (ret)
bioset_exit(&md->bs);
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
return ret;
}
/* /*
* Bind a table to the device. * Bind a table to the device.
*/ */
...@@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, ...@@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* immutable singletons - used to optimize dm_mq_queue_rq. * immutable singletons - used to optimize dm_mq_queue_rq.
*/ */
md->immutable_target = dm_table_get_immutable_target(t); md->immutable_target = dm_table_get_immutable_target(t);
}
ret = __bind_mempools(md, t); /*
if (ret) { * There is no need to reload with request-based dm because the
old_map = ERR_PTR(ret); * size of front_pad doesn't change.
goto out; *
* Note for future: If you are to reload bioset, prep-ed
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
if (!md->mempools) {
md->mempools = t->mempools;
t->mempools = NULL;
}
} else {
/*
* The md may already have mempools that need changing.
* If so, reload bioset because front_pad may have changed
* because a different table was loaded.
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
t->mempools = NULL;
} }
ret = dm_table_set_restrictions(t, md->queue, limits); ret = dm_table_set_restrictions(t, md->queue, limits);
......
...@@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t); ...@@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t); bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
void dm_lock_md_type(struct mapped_device *md); void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md);
......
...@@ -403,7 +403,6 @@ enum { ...@@ -403,7 +403,6 @@ enum {
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *); extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries); extern int biovec_init_pool(mempool_t *pool, int pool_entries);
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask, unsigned int opf, gfp_t gfp_mask,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment