Commit 205fb5f5 authored by Bart Van Assche's avatar Bart Van Assche Committed by Christoph Hellwig

blk-mq: add blk_mq_unique_tag()

The queuecommand() callback functions in SCSI low-level drivers
need to know which hardware context has been selected by the
block layer. Since this information is not available in the
request structure, and since passing the hctx pointer directly to
the queuecommand callback function would require modification of
all SCSI LLDs, add a function to the block layer that allows to
query the hardware context index.
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent f1569ff1
...@@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) ...@@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
return 0; return 0;
} }
/**
* blk_mq_unique_tag() - return a tag that is unique queue-wide
* @rq: request for which to compute a unique tag
*
* The tag field in struct request is unique per hardware queue but not over
* all hardware queues. Hence this function that returns a tag with the
* hardware context index in the upper bits and the per hardware queue tag in
* the lower bits.
*
* Note: When called for a request that is queued on a non-multiqueue request
* queue, the hardware context index is set to zero.
*/
u32 blk_mq_unique_tag(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
int hwq = 0;
if (q->mq_ops) {
hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{ {
char *orig_page = page; char *orig_page = page;
......
...@@ -2024,6 +2024,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -2024,6 +2024,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
*/ */
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{ {
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
if (!set->nr_hw_queues) if (!set->nr_hw_queues)
return -EINVAL; return -EINVAL;
if (!set->queue_depth) if (!set->queue_depth)
......
...@@ -167,6 +167,23 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, ...@@ -167,6 +167,23 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved); gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
};
u32 blk_mq_unique_tag(struct request *rq);
static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
{
return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
}
static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
{
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment