Commit 7c6c5b7c authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: split blk_mq_alloc_and_init_hctx into two parts

Split blk_mq_alloc_and_init_hctx into two parts, and one is
blk_mq_alloc_hctx() for allocating all hctx resources, another
is blk_mq_init_hctx() for initializing hctx, which serves as
counter-part of blk_mq_exit_hctx().

Cc: Dongli Zhang <dongli.zhang@oracle.com>
Cc: James Smart <james.smart@broadcom.com>
Cc: Bart Van Assche <bart.vanassche@wdc.com>
Cc: linux-scsi@vger.kernel.org
Cc: Martin K . Petersen <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James E . J . Bottomley <jejb@linux.vnet.ibm.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c7e2d94b
...@@ -2285,15 +2285,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q, ...@@ -2285,15 +2285,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
} }
} }
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
if (tag_set->flags & BLK_MQ_F_BLOCKING)
hw_ctx_size += sizeof(struct srcu_struct);
return hw_ctx_size;
}
static int blk_mq_init_hctx(struct request_queue *q, static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set, struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{ {
int node; hctx->queue_num = hctx_idx;
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto unregister_cpu_notifier;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
hctx->numa_node))
goto exit_hctx;
return 0;
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
unregister_cpu_notifier:
blk_mq_remove_cpuhp(hctx);
return -1;
}
static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
int node)
{
struct blk_mq_hw_ctx *hctx;
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
if (!hctx)
goto fail_alloc_hctx;
if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
goto free_hctx;
node = hctx->numa_node; atomic_set(&hctx->nr_active, 0);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node; node = set->numa_node;
hctx->numa_node = node;
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock); spin_lock_init(&hctx->lock);
...@@ -2301,58 +2351,45 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -2301,58 +2351,45 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q; hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
/* /*
* Allocate space for all possible cpus to avoid allocation at * Allocate space for all possible cpus to avoid allocation at
* runtime * runtime
*/ */
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); gfp, node);
if (!hctx->ctxs) if (!hctx->ctxs)
goto unregister_cpu_notifier; goto free_cpumask;
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node)) gfp, node))
goto free_ctxs; goto free_ctxs;
hctx->nr_ctx = 0; hctx->nr_ctx = 0;
spin_lock_init(&hctx->dispatch_wait_lock); spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry); INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); gfp);
if (!hctx->fq) if (!hctx->fq)
goto exit_hctx; goto free_bitmap;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING) if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu); init_srcu_struct(hctx->srcu);
blk_mq_hctx_kobj_init(hctx);
return 0; return hctx;
free_fq:
blk_free_flush_queue(hctx->fq);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap: free_bitmap:
sbitmap_free(&hctx->ctx_map); sbitmap_free(&hctx->ctx_map);
free_ctxs: free_ctxs:
kfree(hctx->ctxs); kfree(hctx->ctxs);
unregister_cpu_notifier: free_cpumask:
blk_mq_remove_cpuhp(hctx); free_cpumask_var(hctx->cpumask);
return -1; free_hctx:
kfree(hctx);
fail_alloc_hctx:
return NULL;
} }
static void blk_mq_init_cpu_queues(struct request_queue *q, static void blk_mq_init_cpu_queues(struct request_queue *q,
...@@ -2698,51 +2735,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, ...@@ -2698,51 +2735,25 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
} }
EXPORT_SYMBOL(blk_mq_init_sq_queue); EXPORT_SYMBOL(blk_mq_init_sq_queue);
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
if (tag_set->flags & BLK_MQ_F_BLOCKING)
hw_ctx_size += sizeof(struct srcu_struct);
return hw_ctx_size;
}
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q, struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node) int hctx_idx, int node)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
hctx = kzalloc_node(blk_mq_hw_ctx_size(set), hctx = blk_mq_alloc_hctx(q, set, node);
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
node);
if (!hctx) if (!hctx)
return NULL; goto fail;
if (!zalloc_cpumask_var_node(&hctx->cpumask, if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, goto free_hctx;
node)) {
kfree(hctx);
return NULL;
}
atomic_set(&hctx->nr_active, 0); return hctx;
hctx->numa_node = node;
hctx->queue_num = hctx_idx;
if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) { free_hctx:
free_cpumask_var(hctx->cpumask); kobject_put(&hctx->kobj);
kfree(hctx); fail:
return NULL; return NULL;
}
blk_mq_hctx_kobj_init(hctx);
return hctx;
} }
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment