Commit f0635b8a authored by Jens Axboe's avatar Jens Axboe

bfq: calculate shallow depths at init time

It doesn't change, so don't put it in the per-IO hot path.
Acked-by: default avatarPaolo Valente <paolo.valente@linaro.org>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 55141366
...@@ -486,46 +486,6 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, ...@@ -486,46 +486,6 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
} }
} }
/*
* See the comments on bfq_limit_depth for the purpose of
* the depths set in the function.
*/
static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
{
bfqd->sb_shift = bt->sb.shift;
/*
* In-word depths if no bfq_queue is being weight-raised:
* leaving 25% of tags only for sync reads.
*
* In next formulas, right-shift the value
* (1U<<bfqd->sb_shift), instead of computing directly
* (1U<<(bfqd->sb_shift - something)), to be robust against
* any possible value of bfqd->sb_shift, without having to
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
* raised: leaving ~63% of tags for sync reads. This is the
* highest percentage for which, in our tests, application
* start-up times didn't suffer from any regression due to tag
* shortage.
*/
/* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
}
/* /*
* Async I/O can easily starve sync I/O (both sync reads and sync * Async I/O can easily starve sync I/O (both sync reads and sync
* writes), by consuming all tags. Similarly, storms of sync writes, * writes), by consuming all tags. Similarly, storms of sync writes,
...@@ -535,18 +495,11 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) ...@@ -535,18 +495,11 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
*/ */
static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
{ {
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct bfq_data *bfqd = data->q->elevator->elevator_data; struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct sbitmap_queue *bt;
if (op_is_sync(op) && !op_is_write(op)) if (op_is_sync(op) && !op_is_write(op))
return; return;
bt = &tags->bitmap_tags;
if (unlikely(bfqd->sb_shift != bt->sb.shift))
bfq_update_depths(bfqd, bt);
data->shallow_depth = data->shallow_depth =
bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
...@@ -5126,6 +5079,55 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) ...@@ -5126,6 +5079,55 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
} }
/*
* See the comments on bfq_limit_depth for the purpose of
* the depths set in the function.
*/
static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
{
bfqd->sb_shift = bt->sb.shift;
/*
* In-word depths if no bfq_queue is being weight-raised:
* leaving 25% of tags only for sync reads.
*
* In next formulas, right-shift the value
* (1U<<bfqd->sb_shift), instead of computing directly
* (1U<<(bfqd->sb_shift - something)), to be robust against
* any possible value of bfqd->sb_shift, without having to
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
* raised: leaving ~63% of tags for sync reads. This is the
* highest percentage for which, in our tests, application
* start-up times didn't suffer from any regression due to tag
* shortage.
*/
/* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
}
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
bfq_update_depths(bfqd, &tags->bitmap_tags);
return 0;
}
static void bfq_exit_queue(struct elevator_queue *e) static void bfq_exit_queue(struct elevator_queue *e)
{ {
struct bfq_data *bfqd = e->elevator_data; struct bfq_data *bfqd = e->elevator_data;
...@@ -5547,6 +5549,7 @@ static struct elevator_type iosched_bfq_mq = { ...@@ -5547,6 +5549,7 @@ static struct elevator_type iosched_bfq_mq = {
.requests_merged = bfq_requests_merged, .requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged, .request_merged = bfq_request_merged,
.has_work = bfq_has_work, .has_work = bfq_has_work,
.init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue, .init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue, .exit_sched = bfq_exit_queue,
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment