Commit 442761fd authored by Mike Snitzer's avatar Mike Snitzer

dm: conditionally enable branching for less used features

Use jump_labels to further reduce cost of unlikely branches for zoned
block devices, dm-stats and swap_bios throttling.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 563a225c
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/blk-crypto-profile.h> #include <linux/blk-crypto-profile.h>
#include <linux/jump_label.h>
#include <trace/events/block.h> #include <trace/events/block.h>
...@@ -154,6 +155,10 @@ static inline struct dm_stats *dm_get_stats(struct mapped_device *md) ...@@ -154,6 +155,10 @@ static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
return &md->stats; return &md->stats;
} }
DECLARE_STATIC_KEY_FALSE(stats_enabled);
DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
DECLARE_STATIC_KEY_FALSE(zoned_enabled);
static inline bool dm_emulate_zone_append(struct mapped_device *md) static inline bool dm_emulate_zone_append(struct mapped_device *md)
{ {
if (blk_queue_is_zoned(md->queue)) if (blk_queue_is_zoned(md->queue))
......
...@@ -396,6 +396,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, ...@@ -396,6 +396,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
dm_stats_recalc_precise_timestamps(stats); dm_stats_recalc_precise_timestamps(stats);
if (!static_key_enabled(&stats_enabled.key))
static_branch_enable(&stats_enabled);
mutex_unlock(&stats->mutex); mutex_unlock(&stats->mutex);
resume_callback(md); resume_callback(md);
......
...@@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type); dm_device_name(t->md), type);
if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
static_branch_enable(&swap_bios_enabled);
return 0; return 0;
bad: bad:
...@@ -2040,6 +2043,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -2040,6 +2043,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
r = dm_set_zones_restrictions(t, q); r = dm_set_zones_restrictions(t, q);
if (r) if (r)
return r; return r;
if (!static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
} }
dm_update_crypto_profile(q, t); dm_update_crypto_profile(q, t);
......
...@@ -71,6 +71,10 @@ void dm_issue_global_event(void) ...@@ -71,6 +71,10 @@ void dm_issue_global_event(void)
wake_up(&dm_global_eventq); wake_up(&dm_global_eventq);
} }
DEFINE_STATIC_KEY_FALSE(stats_enabled);
DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
DEFINE_STATIC_KEY_FALSE(zoned_enabled);
/* /*
* One of these is allocated (on-stack) per original bio. * One of these is allocated (on-stack) per original bio.
*/ */
...@@ -516,7 +520,8 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, ...@@ -516,7 +520,8 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
else else
bio_end_io_acct(bio, start_time); bio_end_io_acct(bio, start_time);
if (unlikely(dm_stats_used(&md->stats))) if (static_branch_unlikely(&stats_enabled) &&
unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio), dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio), bio->bi_iter.bi_sector, bio_sectors(bio),
end, start_time, stats_aux); end, start_time, stats_aux);
...@@ -586,7 +591,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) ...@@ -586,7 +591,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io->start_time = jiffies; io->start_time = jiffies;
io->flags = 0; io->flags = 0;
dm_stats_record_start(&md->stats, &io->stats_aux); if (static_branch_unlikely(&stats_enabled))
dm_stats_record_start(&md->stats, &io->stats_aux);
return io; return io;
} }
...@@ -1012,21 +1018,25 @@ static void clone_endio(struct bio *bio) ...@@ -1012,21 +1018,25 @@ static void clone_endio(struct bio *bio)
disable_write_zeroes(md); disable_write_zeroes(md);
} }
if (unlikely(blk_queue_is_zoned(q))) if (static_branch_unlikely(&zoned_enabled) &&
unlikely(blk_queue_is_zoned(q)))
dm_zone_endio(io, bio); dm_zone_endio(io, bio);
if (endio) { if (endio) {
int r = endio(ti, bio, &error); int r = endio(ti, bio, &error);
switch (r) { switch (r) {
case DM_ENDIO_REQUEUE: case DM_ENDIO_REQUEUE:
/* if (static_branch_unlikely(&zoned_enabled)) {
* Requeuing writes to a sequential zone of a zoned /*
* target will break the sequential write pattern: * Requeuing writes to a sequential zone of a zoned
* fail such IO. * target will break the sequential write pattern:
*/ * fail such IO.
if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) */
error = BLK_STS_IOERR; if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
else error = BLK_STS_IOERR;
else
error = BLK_STS_DM_REQUEUE;
} else
error = BLK_STS_DM_REQUEUE; error = BLK_STS_DM_REQUEUE;
fallthrough; fallthrough;
case DM_ENDIO_DONE: case DM_ENDIO_DONE:
...@@ -1040,7 +1050,8 @@ static void clone_endio(struct bio *bio) ...@@ -1040,7 +1050,8 @@ static void clone_endio(struct bio *bio)
} }
} }
if (unlikely(swap_bios_limit(ti, bio))) if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, bio)))
up(&md->swap_bios_semaphore); up(&md->swap_bios_semaphore);
free_tio(bio); free_tio(bio);
...@@ -1295,21 +1306,25 @@ static void __map_bio(struct bio *clone) ...@@ -1295,21 +1306,25 @@ static void __map_bio(struct bio *clone)
dm_io_inc_pending(io); dm_io_inc_pending(io);
tio->old_sector = clone->bi_iter.bi_sector; tio->old_sector = clone->bi_iter.bi_sector;
if (unlikely(swap_bios_limit(ti, clone))) { if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone))) {
int latch = get_swap_bios(); int latch = get_swap_bios();
if (unlikely(latch != md->swap_bios)) if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch); __set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore); down(&md->swap_bios_semaphore);
} }
/* if (static_branch_unlikely(&zoned_enabled)) {
* Check if the IO needs a special mapping due to zone append emulation /*
* on zoned target. In this case, dm_zone_map_bio() calls the target * Check if the IO needs a special mapping due to zone append
* map operation. * emulation on zoned target. In this case, dm_zone_map_bio()
*/ * calls the target map operation.
if (unlikely(dm_emulate_zone_append(md))) */
r = dm_zone_map_bio(tio); if (unlikely(dm_emulate_zone_append(md)))
else r = dm_zone_map_bio(tio);
else
r = ti->type->map(ti, clone);
} else
r = ti->type->map(ti, clone); r = ti->type->map(ti, clone);
switch (r) { switch (r) {
...@@ -1329,7 +1344,8 @@ static void __map_bio(struct bio *clone) ...@@ -1329,7 +1344,8 @@ static void __map_bio(struct bio *clone)
break; break;
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
case DM_MAPIO_REQUEUE: case DM_MAPIO_REQUEUE:
if (unlikely(swap_bios_limit(ti, clone))) if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone)))
up(&md->swap_bios_semaphore); up(&md->swap_bios_semaphore);
free_tio(clone); free_tio(clone);
if (r == DM_MAPIO_KILL) if (r == DM_MAPIO_KILL)
...@@ -1565,7 +1581,8 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, ...@@ -1565,7 +1581,8 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector_count = bio_sectors(bio); ci->sector_count = bio_sectors(bio);
/* Shouldn't happen but sector_count was being set to 0 so... */ /* Shouldn't happen but sector_count was being set to 0 so... */
if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count)) if (static_branch_unlikely(&zoned_enabled) &&
WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
ci->sector_count = 0; ci->sector_count = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment