Commit e8368b57 authored by Yu Kuai's avatar Yu Kuai Committed by Jens Axboe

blk-throttle: use calculate_io/bytes_allowed() for throtl_trim_slice()

There are no functional changes, just make the code cleaner.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20230816012708.1193747-4-yukuai1@huaweicloud.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bb8d5587
...@@ -697,11 +697,40 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw) ...@@ -697,11 +697,40 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true; return true;
} }
static unsigned int calculate_io_allowed(u32 iops_limit,
unsigned long jiffy_elapsed)
{
unsigned int io_allowed;
u64 tmp;
/*
* jiffy_elapsed should not be a big value as minimum iops can be
* 1 then at max jiffy elapsed should be equivalent of 1 second as we
* will allow dispatch after 1 second and after that slice should
* have been trimmed.
*/
tmp = (u64)iops_limit * jiffy_elapsed;
do_div(tmp, HZ);
if (tmp > UINT_MAX)
io_allowed = UINT_MAX;
else
io_allowed = tmp;
return io_allowed;
}
static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
{
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
/* Trim the used slices and adjust slice start accordingly */ /* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{ {
unsigned long nr_slices, time_elapsed, io_trim; unsigned long time_elapsed, io_trim;
u64 bytes_trim, tmp; u64 bytes_trim;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
...@@ -723,19 +752,14 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) ...@@ -723,19 +752,14 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
time_elapsed = jiffies - tg->slice_start[rw]; time_elapsed = rounddown(jiffies - tg->slice_start[rw],
tg->td->throtl_slice);
nr_slices = time_elapsed / tg->td->throtl_slice; if (!time_elapsed)
if (!nr_slices)
return; return;
tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
do_div(tmp, HZ);
bytes_trim = tmp;
io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
HZ;
bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
time_elapsed);
io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed);
if (!bytes_trim && !io_trim) if (!bytes_trim && !io_trim)
return; return;
...@@ -749,41 +773,13 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) ...@@ -749,41 +773,13 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
else else
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; tg->slice_start[rw] += time_elapsed;
throtl_log(&tg->service_queue, throtl_log(&tg->service_queue,
"[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
tg->slice_start[rw], tg->slice_end[rw], jiffies); bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
} jiffies);
static unsigned int calculate_io_allowed(u32 iops_limit,
unsigned long jiffy_elapsed)
{
unsigned int io_allowed;
u64 tmp;
/*
* jiffy_elapsed should not be a big value as minimum iops can be
* 1 then at max jiffy elapsed should be equivalent of 1 second as we
* will allow dispatch after 1 second and after that slice should
* have been trimmed.
*/
tmp = (u64)iops_limit * jiffy_elapsed;
do_div(tmp, HZ);
if (tmp > UINT_MAX)
io_allowed = UINT_MAX;
else
io_allowed = tmp;
return io_allowed;
}
static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
{
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
} }
static void __tg_update_carryover(struct throtl_grp *tg, bool rw) static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment