Commit cd006509 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blk-iocost: account for IO size when testing latencies

On each IO completion, iocost decides whether the IO met or missed its latency
target. Currently, the targets are fixed numbers per IO type. While this can be
good enough for loose latency targets way higher than typical completion
latencies, the effect of IO size makes it difficult to tighten the latency
target - a target adequate for 4k IOs might be too tight for 512k IOs and
vice-versa.

iocost already has all the necessary information to account for different IO
sizes when testing whether the latency target is met as iocost can calculate the
size vtime cost of a given IO. This patch updates the completion path to
calculate the size vtime cost of the IO, deduct the nsec equivalent from the
observed latency and use the adjusted value to decide whether the target is met.

This makes latency targets independent from IO size and enables determining
adequate latency targets with fixed size fio runs.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Andy Newell <newella@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 54c52e10
...@@ -146,6 +146,7 @@ config BLK_CGROUP_IOLATENCY ...@@ -146,6 +146,7 @@ config BLK_CGROUP_IOLATENCY
config BLK_CGROUP_IOCOST config BLK_CGROUP_IOCOST
bool "Enable support for cost model based cgroup IO controller" bool "Enable support for cost model based cgroup IO controller"
depends on BLK_CGROUP=y depends on BLK_CGROUP=y
select BLK_RQ_IO_DATA_LEN
select BLK_RQ_ALLOC_TIME select BLK_RQ_ALLOC_TIME
---help--- ---help---
Enabling this option enables the .weight interface for cost Enabling this option enables the .weight interface for cost
......
...@@ -260,6 +260,7 @@ enum { ...@@ -260,6 +260,7 @@ enum {
VTIME_PER_SEC_SHIFT = 37, VTIME_PER_SEC_SHIFT = 37,
VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT, VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC, VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
/* bound vrate adjustments within two orders of magnitude */ /* bound vrate adjustments within two orders of magnitude */
VRATE_MIN_PPM = 10000, /* 1% */ VRATE_MIN_PPM = 10000, /* 1% */
...@@ -1668,6 +1669,31 @@ static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) ...@@ -1668,6 +1669,31 @@ static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
return cost; return cost;
} }
static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
u64 *costp)
{
unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
switch (req_op(rq)) {
case REQ_OP_READ:
*costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
break;
case REQ_OP_WRITE:
*costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
break;
default:
*costp = 0;
}
}
static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
{
u64 cost;
calc_size_vtime_cost_builtin(rq, ioc, &cost);
return cost;
}
static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
{ {
struct blkcg_gq *blkg = bio->bi_blkg; struct blkcg_gq *blkg = bio->bi_blkg;
...@@ -1837,7 +1863,7 @@ static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) ...@@ -1837,7 +1863,7 @@ static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
{ {
struct ioc *ioc = rqos_to_ioc(rqos); struct ioc *ioc = rqos_to_ioc(rqos);
u64 on_q_ns, rq_wait_ns; u64 on_q_ns, rq_wait_ns, size_nsec;
int pidx, rw; int pidx, rw;
if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
...@@ -1858,8 +1884,10 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) ...@@ -1858,8 +1884,10 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
on_q_ns = ktime_get_ns() - rq->alloc_time_ns; on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC) if (on_q_ns <= size_nsec ||
on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met); this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met);
else else
this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed); this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed);
...@@ -2267,6 +2295,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, ...@@ -2267,6 +2295,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
if (enable) { if (enable) {
blk_stat_enable_accounting(ioc->rqos.q);
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
ioc->enabled = true; ioc->enabled = true;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment