Commit 07319678 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

blk-mq: Reduce blk_mq_hw_ctx size

Since the srcu structure is rather large (184 bytes on an x86-64
system with kernel debugging disabled), only allocate it if needed.
Reported-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Omar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent efbeccdb
...@@ -172,7 +172,7 @@ void blk_mq_quiesce_queue(struct request_queue *q) ...@@ -172,7 +172,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
synchronize_srcu(&hctx->queue_rq_srcu); synchronize_srcu(hctx->queue_rq_srcu);
else else
rcu = true; rcu = true;
} }
...@@ -1094,9 +1094,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -1094,9 +1094,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
} else { } else {
might_sleep(); might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
blk_mq_sched_dispatch_requests(hctx); blk_mq_sched_dispatch_requests(hctx);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
} }
} }
...@@ -1505,9 +1505,9 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1505,9 +1505,9 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
might_sleep(); might_sleep();
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
__blk_mq_try_issue_directly(hctx, rq, cookie, true); __blk_mq_try_issue_directly(hctx, rq, cookie, true);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
} }
} }
...@@ -1853,7 +1853,7 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -1853,7 +1853,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
cleanup_srcu_struct(&hctx->queue_rq_srcu); cleanup_srcu_struct(hctx->queue_rq_srcu);
blk_mq_remove_cpuhp(hctx); blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq); blk_free_flush_queue(hctx->fq);
...@@ -1926,7 +1926,7 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -1926,7 +1926,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto free_fq; goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(&hctx->queue_rq_srcu); init_srcu_struct(hctx->queue_rq_srcu);
blk_mq_debugfs_register_hctx(q, hctx); blk_mq_debugfs_register_hctx(q, hctx);
...@@ -2201,6 +2201,20 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -2201,6 +2201,20 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
} }
EXPORT_SYMBOL(blk_mq_init_queue); EXPORT_SYMBOL(blk_mq_init_queue);
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
if (tag_set->flags & BLK_MQ_F_BLOCKING)
hw_ctx_size += sizeof(struct srcu_struct);
return hw_ctx_size;
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q) struct request_queue *q)
{ {
...@@ -2215,7 +2229,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, ...@@ -2215,7 +2229,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
continue; continue;
node = blk_mq_hw_queue_to_node(q->mq_map, i); node = blk_mq_hw_queue_to_node(q->mq_map, i);
hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
GFP_KERNEL, node); GFP_KERNEL, node);
if (!hctxs[i]) if (!hctxs[i])
break; break;
......
...@@ -39,8 +39,6 @@ struct blk_mq_hw_ctx { ...@@ -39,8 +39,6 @@ struct blk_mq_hw_ctx {
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
struct blk_mq_tags *sched_tags; struct blk_mq_tags *sched_tags;
struct srcu_struct queue_rq_srcu;
unsigned long queued; unsigned long queued;
unsigned long run; unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7 #define BLK_MQ_MAX_DISPATCH_ORDER 7
...@@ -62,6 +60,9 @@ struct blk_mq_hw_ctx { ...@@ -62,6 +60,9 @@ struct blk_mq_hw_ctx {
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir; struct dentry *sched_debugfs_dir;
#endif #endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
struct srcu_struct queue_rq_srcu[0];
}; };
struct blk_mq_tag_set { struct blk_mq_tag_set {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment