Commit 54c52e10 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blk-iocost: switch to fixed non-auto-decaying use_delay

The use_delay mechanism was introduced by blk-iolatency to hold memory
allocators accountable for the reclaim and other shared IOs they cause. The
duration of the delay is dynamically balanced between iolatency increasing the
value on each target miss and it auto-decaying as time passes and threads get
delayed on it.

While this works well for iolatency, iocost's control model isn't compatible
with it. There is no repeated "violation" events which can be balanced against
auto-decaying. iocost instead knows how much a given cgroup is over budget and
wants to prevent that cgroup from issuing IOs while over budget. Until now,
iocost has been adding the cost of force-issued IOs. However, this doesn't
reflect the amount which is already over budget and is simply not enough to
counter the auto-decaying allowing anon-memory leaking low priority cgroup to
go over its alloted share of IOs.

As auto-decaying doesn't make much sense for iocost, this patch introduces a
different mode of operation for use_delay - when blkcg_set_delay() are used
insted of blkcg_add/use_delay(), the delay duration is not auto-decayed until it
is explicitly cleared with blkcg_clear_delay(). iocost is updated to keep the
delay duration synchronized to the budget overage amount.

With this change, iocost can effectively police cgroups which generate
significant amount of force-issued IOs.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent accea322
...@@ -1530,6 +1530,10 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) ...@@ -1530,6 +1530,10 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
{ {
u64 old = atomic64_read(&blkg->delay_start); u64 old = atomic64_read(&blkg->delay_start);
/* negative use_delay means no scaling, see blkcg_set_delay() */
if (atomic_read(&blkg->use_delay) < 0)
return;
/* /*
* We only want to scale down every second. The idea here is that we * We only want to scale down every second. The idea here is that we
* want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
...@@ -1717,6 +1721,8 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) ...@@ -1717,6 +1721,8 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
*/ */
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
{ {
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
return;
blkcg_scale_delay(blkg, now); blkcg_scale_delay(blkg, now);
atomic64_add(delta, &blkg->delay_nsec); atomic64_add(delta, &blkg->delay_nsec);
} }
......
...@@ -1209,14 +1209,14 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) ...@@ -1209,14 +1209,14 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
{ {
struct ioc *ioc = iocg->ioc; struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg); struct blkcg_gq *blkg = iocg_to_blkg(iocg);
u64 vtime = atomic64_read(&iocg->vtime); u64 vtime = atomic64_read(&iocg->vtime);
u64 vmargin = ioc->margin_us * now->vrate; u64 vmargin = ioc->margin_us * now->vrate;
u64 margin_ns = ioc->margin_us * NSEC_PER_USEC; u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
u64 expires, oexpires; u64 delta_ns, expires, oexpires;
u32 hw_inuse; u32 hw_inuse;
/* debt-adjust vtime */ /* debt-adjust vtime */
...@@ -1233,15 +1233,10 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) ...@@ -1233,15 +1233,10 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
return false; return false;
/* use delay */ /* use delay */
if (cost) { delta_ns = DIV64_U64_ROUND_UP(vtime - now->vnow,
u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC, now->vrate) * NSEC_PER_USEC;
now->vrate); blkcg_set_delay(blkg, delta_ns);
blkcg_add_delay(blkg, now->now_ns, cost_ns); expires = now->now_ns + delta_ns;
}
blkcg_use_delay(blkg);
expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow,
now->vrate) * NSEC_PER_USEC;
/* if already active and close enough, don't bother */ /* if already active and close enough, don't bother */
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
...@@ -1260,7 +1255,7 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) ...@@ -1260,7 +1255,7 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
struct ioc_now now; struct ioc_now now;
ioc_now(iocg->ioc, &now); ioc_now(iocg->ioc, &now);
iocg_kick_delay(iocg, &now, 0); iocg_kick_delay(iocg, &now);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -1378,7 +1373,7 @@ static void ioc_timer_fn(struct timer_list *timer) ...@@ -1378,7 +1373,7 @@ static void ioc_timer_fn(struct timer_list *timer)
atomic64_read(&iocg->abs_vdebt)) { atomic64_read(&iocg->abs_vdebt)) {
/* might be oversleeping vtime / hweight changes, kick */ /* might be oversleeping vtime / hweight changes, kick */
iocg_kick_waitq(iocg, &now); iocg_kick_waitq(iocg, &now);
iocg_kick_delay(iocg, &now, 0); iocg_kick_delay(iocg, &now);
} else if (iocg_is_idle(iocg)) { } else if (iocg_is_idle(iocg)) {
/* no waiter and idle, deactivate */ /* no waiter and idle, deactivate */
iocg->last_inuse = iocg->inuse; iocg->last_inuse = iocg->inuse;
...@@ -1737,7 +1732,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) ...@@ -1737,7 +1732,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
*/ */
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt); atomic64_add(abs_cost, &iocg->abs_vdebt);
if (iocg_kick_delay(iocg, &now, cost)) if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q, blkcg_schedule_throttle(rqos->q,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
return; return;
......
...@@ -631,6 +631,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, ...@@ -631,6 +631,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
static inline void blkcg_use_delay(struct blkcg_gq *blkg) static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{ {
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
return;
if (atomic_add_return(1, &blkg->use_delay) == 1) if (atomic_add_return(1, &blkg->use_delay) == 1)
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
} }
...@@ -639,6 +641,8 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) ...@@ -639,6 +641,8 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{ {
int old = atomic_read(&blkg->use_delay); int old = atomic_read(&blkg->use_delay);
if (WARN_ON_ONCE(old < 0))
return 0;
if (old == 0) if (old == 0)
return 0; return 0;
...@@ -663,20 +667,39 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) ...@@ -663,20 +667,39 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
return 1; return 1;
} }
/**
* blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
* @blkg: target blkg
* @delay: delay duration in nsecs
*
* When enabled with this function, the delay is not decayed and must be
* explicitly cleared with blkcg_clear_delay(). Must not be mixed with
* blkcg_[un]use_delay() and blkcg_add_delay() usages.
*/
static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
{
int old = atomic_read(&blkg->use_delay);
/* We only want 1 person setting the congestion count for this blkg. */
if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
atomic64_set(&blkg->delay_nsec, delay);
}
/**
* blkcg_clear_delay - Disable allocator delay mechanism
* @blkg: target blkg
*
* Disable use_delay mechanism. See blkcg_set_delay().
*/
static inline void blkcg_clear_delay(struct blkcg_gq *blkg) static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
{ {
int old = atomic_read(&blkg->use_delay); int old = atomic_read(&blkg->use_delay);
if (!old)
return;
/* We only want 1 person clearing the congestion count for this blkg. */ /* We only want 1 person clearing the congestion count for this blkg. */
while (old) { if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
if (cur == old) {
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
break;
}
old = cur;
}
} }
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment