Commit 4a69f325 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-cgroup: cleanup the blkg_lookup family of functions

Add a fully inlined blkg_lookup as the extra two checks aren't going
to generated a lot more code vs the call to the slowpath routine, and
open code the hint update in the two callers that care.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAndreas Herrmann <aherrmann@suse.de>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220921180501.1539876-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 79fcc5be
...@@ -263,29 +263,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, ...@@ -263,29 +263,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
return NULL; return NULL;
} }
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, static void blkg_update_hint(struct blkcg *blkcg, struct blkcg_gq *blkg)
struct request_queue *q, bool update_hint)
{ {
struct blkcg_gq *blkg; lockdep_assert_held(&blkg->q->queue_lock);
/* if (blkcg != &blkcg_root && blkg != rcu_dereference(blkcg->blkg_hint))
* Hint didn't match. Look up from the radix tree. Note that the
* hint can only be updated under queue_lock as otherwise @blkg
* could have already been removed from blkg_tree. The caller is
* responsible for grabbing queue_lock if @update_hint.
*/
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg); rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
}
return NULL;
} }
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
/* /*
* If @new_blkg is %NULL, this function tries to allocate a new one as * If @new_blkg is %NULL, this function tries to allocate a new one as
...@@ -397,9 +381,11 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, ...@@ -397,9 +381,11 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
return blkg; return blkg;
spin_lock_irqsave(&q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
blkg = __blkg_lookup(blkcg, q, true); blkg = blkg_lookup(blkcg, q);
if (blkg) if (blkg) {
blkg_update_hint(blkcg, blkg);
goto found; goto found;
}
/* /*
* Create blkgs walking down from blkcg_root to @blkcg, so that all * Create blkgs walking down from blkcg_root to @blkcg, so that all
...@@ -621,12 +607,18 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg, ...@@ -621,12 +607,18 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
const struct blkcg_policy *pol, const struct blkcg_policy *pol,
struct request_queue *q) struct request_queue *q)
{ {
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(&q->queue_lock); lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
return __blkg_lookup(blkcg, q, true /* update_hint */);
blkg = blkg_lookup(blkcg, q);
if (blkg)
blkg_update_hint(blkcg, blkg);
return blkg;
} }
/** /**
......
...@@ -178,8 +178,6 @@ struct blkcg_policy { ...@@ -178,8 +178,6 @@ struct blkcg_policy {
extern struct blkcg blkcg_root; extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats; extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
int blkcg_init_queue(struct request_queue *q); int blkcg_init_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q);
...@@ -227,22 +225,21 @@ static inline bool bio_issue_as_root_blkg(struct bio *bio) ...@@ -227,22 +225,21 @@ static inline bool bio_issue_as_root_blkg(struct bio *bio)
} }
/** /**
* __blkg_lookup - internal version of blkg_lookup() * blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest * @blkcg: blkcg of interest
* @q: request_queue of interest * @q: request_queue of interest
* @update_hint: whether to update lookup hint with the result or not
* *
* This is internal version and shouldn't be used by policy * Lookup blkg for the @blkcg - @q pair.
* implementations. Looks up blkgs for the @blkcg - @q pair regardless of
* @q's bypass state. If @update_hint is %true, the caller should be * Must be called in a RCU critical section.
* holding @q->queue_lock and lookup hint is updated on success.
*/ */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q, struct request_queue *q)
bool update_hint)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
if (blkcg == &blkcg_root) if (blkcg == &blkcg_root)
return q->root_blkg; return q->root_blkg;
...@@ -250,22 +247,10 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, ...@@ -250,22 +247,10 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
if (blkg && blkg->q == q) if (blkg && blkg->q == q)
return blkg; return blkg;
return blkg_lookup_slowpath(blkcg, q, update_hint); blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
} if (blkg && blkg->q != q)
blkg = NULL;
/** return blkg;
* blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
* under RCU read lock.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return __blkg_lookup(blkcg, q, false);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment