Commit cfc97abc authored by Mike Snitzer's avatar Mike Snitzer

dm: conditionally enable BIOSET_PERCPU_CACHE for dm_io bioset

A bioset's per-cpu alloc cache may have broader utility in the future
but for now constrain it to being tightly coupled to QUEUE_FLAG_POLL.

Also change dm_io_complete() to use bio_clear_polled() so that it
properly clears all associated bio state on requeue.

This commit improves DM's hipri bio polling (REQ_POLLED) perf by
7 - 20% depending on the system.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 069adbac
...@@ -1002,6 +1002,8 @@ bool dm_table_request_based(struct dm_table *t) ...@@ -1002,6 +1002,8 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t)); return __table_type_request_based(dm_table_get_type(t));
} }
static int dm_table_supports_poll(struct dm_table *t);
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{ {
enum dm_queue_mode type = dm_table_get_type(t); enum dm_queue_mode type = dm_table_get_type(t);
...@@ -1009,21 +1011,24 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * ...@@ -1009,21 +1011,24 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
unsigned min_pool_size = 0; unsigned min_pool_size = 0;
struct dm_target *ti; struct dm_target *ti;
unsigned i; unsigned i;
bool poll_supported = false;
if (unlikely(type == DM_TYPE_NONE)) { if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools"); DMWARN("no table type is set, can't allocate mempools");
return -EINVAL; return -EINVAL;
} }
if (__table_type_bio_based(type)) if (__table_type_bio_based(type)) {
for (i = 0; i < t->num_targets; i++) { for (i = 0; i < t->num_targets; i++) {
ti = t->targets + i; ti = t->targets + i;
per_io_data_size = max(per_io_data_size, ti->per_io_data_size); per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
min_pool_size = max(min_pool_size, ti->num_flush_bios); min_pool_size = max(min_pool_size, ti->num_flush_bios);
} }
poll_supported = !!dm_table_supports_poll(t);
}
t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
per_io_data_size, min_pool_size); t->integrity_supported, poll_supported);
if (!t->mempools) if (!t->mempools)
return -ENOMEM; return -ENOMEM;
......
...@@ -899,7 +899,7 @@ static void dm_io_complete(struct dm_io *io) ...@@ -899,7 +899,7 @@ static void dm_io_complete(struct dm_io *io)
* may only reflect a subset of the pre-split original) * may only reflect a subset of the pre-split original)
* so clear REQ_POLLED in case of requeue. * so clear REQ_POLLED in case of requeue.
*/ */
bio->bi_opf &= ~REQ_POLLED; bio_clear_polled(bio);
if (io_error == BLK_STS_AGAIN) { if (io_error == BLK_STS_AGAIN) {
/* io_uring doesn't handle BLK_STS_AGAIN (yet) */ /* io_uring doesn't handle BLK_STS_AGAIN (yet) */
queue_io(md, bio); queue_io(md, bio);
...@@ -2901,8 +2901,8 @@ int dm_noflush_suspending(struct dm_target *ti) ...@@ -2901,8 +2901,8 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending); EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_io_data_size, unsigned per_io_data_size, unsigned min_pool_size,
unsigned min_pool_size) bool integrity, bool poll)
{ {
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0; unsigned int pool_size = 0;
...@@ -2918,7 +2918,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu ...@@ -2918,7 +2918,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
if (ret) if (ret)
goto out; goto out;
if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
......
...@@ -221,8 +221,8 @@ void dm_kcopyd_exit(void); ...@@ -221,8 +221,8 @@ void dm_kcopyd_exit(void);
* Mempool operations * Mempool operations
*/ */
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_bio_data_size, unsigned per_io_data_size, unsigned min_pool_size,
unsigned min_pool_size); bool integrity, bool poll);
void dm_free_md_mempools(struct dm_md_mempools *pools); void dm_free_md_mempools(struct dm_md_mempools *pools);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment