Commit ba91c849 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-rq-qos: store a gendisk instead of request_queue in struct rq_qos

This is what about half of the users already want, and it's only going to
grow more.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAndreas Herrmann <aherrmann@suse.de>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20230203150400.3199230-16-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3963d84d
...@@ -669,7 +669,7 @@ static struct ioc *q_to_ioc(struct request_queue *q) ...@@ -669,7 +669,7 @@ static struct ioc *q_to_ioc(struct request_queue *q)
static const char __maybe_unused *ioc_name(struct ioc *ioc) static const char __maybe_unused *ioc_name(struct ioc *ioc)
{ {
struct gendisk *disk = ioc->rqos.q->disk; struct gendisk *disk = ioc->rqos.disk;
if (!disk) if (!disk)
return "<unknown>"; return "<unknown>";
...@@ -808,11 +808,11 @@ static int ioc_autop_idx(struct ioc *ioc) ...@@ -808,11 +808,11 @@ static int ioc_autop_idx(struct ioc *ioc)
u64 now_ns; u64 now_ns;
/* rotational? */ /* rotational? */
if (!blk_queue_nonrot(ioc->rqos.q)) if (!blk_queue_nonrot(ioc->rqos.disk->queue))
return AUTOP_HDD; return AUTOP_HDD;
/* handle SATA SSDs w/ broken NCQ */ /* handle SATA SSDs w/ broken NCQ */
if (blk_queue_depth(ioc->rqos.q) == 1) if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
return AUTOP_SSD_QD1; return AUTOP_SSD_QD1;
/* use one of the normal ssd sets */ /* use one of the normal ssd sets */
...@@ -2649,7 +2649,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) ...@@ -2649,7 +2649,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
if (use_debt) { if (use_debt) {
iocg_incur_debt(iocg, abs_cost, &now); iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now)) if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q->disk, blkcg_schedule_throttle(rqos->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
iocg_unlock(iocg, ioc_locked, &flags); iocg_unlock(iocg, ioc_locked, &flags);
return; return;
...@@ -2750,7 +2750,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, ...@@ -2750,7 +2750,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
if (likely(!list_empty(&iocg->active_list))) { if (likely(!list_empty(&iocg->active_list))) {
iocg_incur_debt(iocg, abs_cost, &now); iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now)) if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q->disk, blkcg_schedule_throttle(rqos->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
} else { } else {
iocg_commit_bio(iocg, bio, abs_cost, cost); iocg_commit_bio(iocg, bio, abs_cost, cost);
...@@ -2821,7 +2821,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos) ...@@ -2821,7 +2821,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
{ {
struct ioc *ioc = rqos_to_ioc(rqos); struct ioc *ioc = rqos_to_ioc(rqos);
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost); blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iocost);
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
ioc->running = IOC_STOP; ioc->running = IOC_STOP;
......
...@@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos, ...@@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
if (use_delay) if (use_delay)
blkcg_schedule_throttle(rqos->q->disk, use_memdelay); blkcg_schedule_throttle(rqos->disk, use_memdelay);
/* /*
* To avoid priority inversions we want to just take a slot if we are * To avoid priority inversions we want to just take a slot if we are
...@@ -330,7 +330,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat, ...@@ -330,7 +330,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
struct child_latency_info *lat_info, struct child_latency_info *lat_info,
bool up) bool up)
{ {
unsigned long qd = blkiolat->rqos.q->nr_requests; unsigned long qd = blkiolat->rqos.disk->queue->nr_requests;
unsigned long scale = scale_amount(qd, up); unsigned long scale = scale_amount(qd, up);
unsigned long old = atomic_read(&lat_info->scale_cookie); unsigned long old = atomic_read(&lat_info->scale_cookie);
unsigned long max_scale = qd << 1; unsigned long max_scale = qd << 1;
...@@ -372,7 +372,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat, ...@@ -372,7 +372,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
*/ */
static void scale_change(struct iolatency_grp *iolat, bool up) static void scale_change(struct iolatency_grp *iolat, bool up)
{ {
unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests;
unsigned long scale = scale_amount(qd, up); unsigned long scale = scale_amount(qd, up);
unsigned long old = iolat->max_depth; unsigned long old = iolat->max_depth;
...@@ -646,7 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos) ...@@ -646,7 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
timer_shutdown_sync(&blkiolat->timer); timer_shutdown_sync(&blkiolat->timer);
flush_work(&blkiolat->enable_work); flush_work(&blkiolat->enable_work);
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iolatency);
kfree(blkiolat); kfree(blkiolat);
} }
...@@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) ...@@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
rcu_read_lock(); rcu_read_lock();
blkg_for_each_descendant_pre(blkg, pos_css, blkg_for_each_descendant_pre(blkg, pos_css,
blkiolat->rqos.q->root_blkg) { blkiolat->rqos.disk->queue->root_blkg) {
struct iolatency_grp *iolat; struct iolatency_grp *iolat;
struct child_latency_info *lat_info; struct child_latency_info *lat_info;
unsigned long flags; unsigned long flags;
...@@ -749,9 +749,9 @@ static void blkiolatency_enable_work_fn(struct work_struct *work) ...@@ -749,9 +749,9 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
*/ */
enabled = atomic_read(&blkiolat->enable_cnt); enabled = atomic_read(&blkiolat->enable_cnt);
if (enabled != blkiolat->enabled) { if (enabled != blkiolat->enabled) {
blk_mq_freeze_queue(blkiolat->rqos.q); blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
blkiolat->enabled = enabled; blkiolat->enabled = enabled;
blk_mq_unfreeze_queue(blkiolat->rqos.q); blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
} }
} }
......
...@@ -813,9 +813,9 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id) ...@@ -813,9 +813,9 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
{ {
lockdep_assert_held(&rqos->q->debugfs_mutex); lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
if (!rqos->q->debugfs_dir) if (!rqos->disk->queue->debugfs_dir)
return; return;
debugfs_remove_recursive(rqos->debugfs_dir); debugfs_remove_recursive(rqos->debugfs_dir);
rqos->debugfs_dir = NULL; rqos->debugfs_dir = NULL;
...@@ -823,7 +823,7 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) ...@@ -823,7 +823,7 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{ {
struct request_queue *q = rqos->q; struct request_queue *q = rqos->disk->queue;
const char *dir_name = rq_qos_id_to_name(rqos->id); const char *dir_name = rq_qos_id_to_name(rqos->id);
lockdep_assert_held(&q->debugfs_mutex); lockdep_assert_held(&q->debugfs_mutex);
...@@ -835,9 +835,7 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) ...@@ -835,9 +835,7 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
q->rqos_debugfs_dir = debugfs_create_dir("rqos", q->rqos_debugfs_dir = debugfs_create_dir("rqos",
q->debugfs_dir); q->debugfs_dir);
rqos->debugfs_dir = debugfs_create_dir(dir_name, rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
rqos->q->rqos_debugfs_dir);
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
} }
......
...@@ -300,7 +300,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id, ...@@ -300,7 +300,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
rqos->q = q; rqos->disk = disk;
rqos->id = id; rqos->id = id;
rqos->ops = ops; rqos->ops = ops;
...@@ -337,7 +337,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id, ...@@ -337,7 +337,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
void rq_qos_del(struct rq_qos *rqos) void rq_qos_del(struct rq_qos *rqos)
{ {
struct request_queue *q = rqos->q; struct request_queue *q = rqos->disk->queue;
struct rq_qos **cur; struct rq_qos **cur;
/* /*
......
...@@ -26,7 +26,7 @@ struct rq_wait { ...@@ -26,7 +26,7 @@ struct rq_wait {
struct rq_qos { struct rq_qos {
const struct rq_qos_ops *ops; const struct rq_qos_ops *ops;
struct request_queue *q; struct gendisk *disk;
enum rq_qos_id id; enum rq_qos_id id;
struct rq_qos *next; struct rq_qos *next;
#ifdef CONFIG_BLK_DEBUG_FS #ifdef CONFIG_BLK_DEBUG_FS
......
...@@ -165,7 +165,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var) ...@@ -165,7 +165,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/ */
static bool wb_recent_wait(struct rq_wb *rwb) static bool wb_recent_wait(struct rq_wb *rwb)
{ {
struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb; struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
return time_before(jiffies, wb->dirty_sleep + HZ); return time_before(jiffies, wb->dirty_sleep + HZ);
} }
...@@ -312,7 +312,7 @@ enum { ...@@ -312,7 +312,7 @@ enum {
static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
{ {
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi; struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth; struct rq_depth *rqd = &rwb->rq_depth;
u64 thislat; u64 thislat;
...@@ -365,7 +365,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) ...@@ -365,7 +365,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
static void rwb_trace_step(struct rq_wb *rwb, const char *msg) static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
{ {
struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi; struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
struct rq_depth *rqd = &rwb->rq_depth; struct rq_depth *rqd = &rwb->rq_depth;
trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec, trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
...@@ -435,13 +435,12 @@ static void wb_timer_fn(struct blk_stat_callback *cb) ...@@ -435,13 +435,12 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
unsigned int inflight = wbt_inflight(rwb); unsigned int inflight = wbt_inflight(rwb);
int status; int status;
if (!rwb->rqos.q->disk) if (!rwb->rqos.disk)
return; return;
status = latency_exceeded(rwb, cb->stat); status = latency_exceeded(rwb, cb->stat);
trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step, trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);
inflight);
/* /*
* If we exceeded the latency target, step down. If we did not, * If we exceeded the latency target, step down. If we did not,
...@@ -779,16 +778,15 @@ static int wbt_data_dir(const struct request *rq) ...@@ -779,16 +778,15 @@ static int wbt_data_dir(const struct request *rq)
static void wbt_queue_depth_changed(struct rq_qos *rqos) static void wbt_queue_depth_changed(struct rq_qos *rqos)
{ {
RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q); RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
wbt_update_limits(RQWB(rqos)); wbt_update_limits(RQWB(rqos));
} }
static void wbt_exit(struct rq_qos *rqos) static void wbt_exit(struct rq_qos *rqos)
{ {
struct rq_wb *rwb = RQWB(rqos); struct rq_wb *rwb = RQWB(rqos);
struct request_queue *q = rqos->q;
blk_stat_remove_callback(q, rwb->cb); blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
blk_stat_free_callback(rwb->cb); blk_stat_free_callback(rwb->cb);
kfree(rwb); kfree(rwb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment