Commit 51db1c37 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: Rename BLK_MQ_F_TAG_SHARED as BLK_MQ_F_TAG_QUEUE_SHARED

BLK_MQ_F_TAG_SHARED actually means that tags is shared among request
queues, all of which should belong to LUNs attached to same HBA.

So rename it to make the point explicitly.

[jpg: rebase a few times, add rnbd-clt.c change]
Suggested-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Tested-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent de09077c
...@@ -240,7 +240,7 @@ static const char *const alloc_policy_name[] = { ...@@ -240,7 +240,7 @@ static const char *const alloc_policy_name[] = {
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
static const char *const hctx_flag_name[] = { static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(SHOULD_MERGE), HCTX_FLAG_NAME(SHOULD_MERGE),
HCTX_FLAG_NAME(TAG_SHARED), HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
HCTX_FLAG_NAME(BLOCKING), HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(NO_SCHED),
HCTX_FLAG_NAME(STACKING), HCTX_FLAG_NAME(STACKING),
......
...@@ -56,7 +56,7 @@ extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); ...@@ -56,7 +56,7 @@ extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return false; return false;
return __blk_mq_tag_busy(hctx); return __blk_mq_tag_busy(hctx);
...@@ -64,7 +64,7 @@ static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) ...@@ -64,7 +64,7 @@ static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{ {
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return; return;
__blk_mq_tag_idle(hctx); __blk_mq_tag_idle(hctx);
...@@ -79,7 +79,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -79,7 +79,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
{ {
unsigned int depth, users; unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true; return true;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true; return true;
......
...@@ -1124,7 +1124,7 @@ static bool blk_mq_get_driver_tag(struct request *rq) ...@@ -1124,7 +1124,7 @@ static bool blk_mq_get_driver_tag(struct request *rq)
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
return false; return false;
if ((hctx->flags & BLK_MQ_F_TAG_SHARED) && if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
!(rq->rq_flags & RQF_MQ_INFLIGHT)) { !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
rq->rq_flags |= RQF_MQ_INFLIGHT; rq->rq_flags |= RQF_MQ_INFLIGHT;
atomic_inc(&hctx->nr_active); atomic_inc(&hctx->nr_active);
...@@ -1168,7 +1168,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, ...@@ -1168,7 +1168,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
wait_queue_entry_t *wait; wait_queue_entry_t *wait;
bool ret; bool ret;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
blk_mq_sched_mark_restart_hctx(hctx); blk_mq_sched_mark_restart_hctx(hctx);
/* /*
...@@ -1420,7 +1420,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, ...@@ -1420,7 +1420,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
bool needs_restart; bool needs_restart;
/* For non-shared tags, the RESTART check will suffice */ /* For non-shared tags, the RESTART check will suffice */
bool no_tag = prep == PREP_DISPATCH_NO_TAG && bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
(hctx->flags & BLK_MQ_F_TAG_SHARED); (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET; bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
blk_mq_release_budgets(q, nr_budgets); blk_mq_release_budgets(q, nr_budgets);
...@@ -2668,7 +2668,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, ...@@ -2668,7 +2668,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
spin_lock_init(&hctx->lock); spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch); INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q; hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
INIT_LIST_HEAD(&hctx->hctx_list); INIT_LIST_HEAD(&hctx->hctx_list);
...@@ -2885,9 +2885,9 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) ...@@ -2885,9 +2885,9 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (shared) if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED; hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
else else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED; hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
} }
} }
...@@ -2913,7 +2913,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) ...@@ -2913,7 +2913,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
list_del(&q->tag_set_list); list_del(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) { if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */ /* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED; set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
/* update existing queue */ /* update existing queue */
blk_mq_update_tag_set_depth(set, false); blk_mq_update_tag_set_depth(set, false);
} }
...@@ -2930,12 +2930,12 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, ...@@ -2930,12 +2930,12 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
* Check to see if we're transitioning to shared (from 1 to 2 queues). * Check to see if we're transitioning to shared (from 1 to 2 queues).
*/ */
if (!list_empty(&set->tag_list) && if (!list_empty(&set->tag_list) &&
!(set->flags & BLK_MQ_F_TAG_SHARED)) { !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
set->flags |= BLK_MQ_F_TAG_SHARED; set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
/* update existing queue */ /* update existing queue */
blk_mq_update_tag_set_depth(set, true); blk_mq_update_tag_set_depth(set, true);
} }
if (set->flags & BLK_MQ_F_TAG_SHARED) if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
queue_set_hctx_shared(q, true); queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list); list_add_tail(&q->tag_set_list, &set->tag_list);
......
...@@ -1174,7 +1174,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess) ...@@ -1174,7 +1174,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->queue_depth = sess->queue_depth; tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE; tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE | tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_TAG_SHARED; BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu); tag_set->cmd_size = sizeof(struct rnbd_iu);
tag_set->nr_hw_queues = num_online_cpus(); tag_set->nr_hw_queues = num_online_cpus();
......
...@@ -378,7 +378,7 @@ struct blk_mq_ops { ...@@ -378,7 +378,7 @@ struct blk_mq_ops {
enum { enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
/* /*
* Set when this device requires underlying blk-mq device for * Set when this device requires underlying blk-mq device for
* completing IO: * completing IO:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment