Commit 99e48cd6 authored by John Garry's avatar John Garry Committed by Jens Axboe

blk-mq: Add a flag for reserved requests

Add a flag for reserved requests so that drivers may know this for any
special handling.
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/1657109034-206040-3-git-send-email-john.garry@huawei.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent deef1be1
...@@ -475,6 +475,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) ...@@ -475,6 +475,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (!(data->rq_flags & RQF_ELV)) if (!(data->rq_flags & RQF_ELV))
blk_mq_tag_busy(data->hctx); blk_mq_tag_busy(data->hctx);
if (data->flags & BLK_MQ_REQ_RESERVED)
data->rq_flags |= RQF_RESV;
/* /*
* Try batched alloc if we want more than 1 tag. * Try batched alloc if we want more than 1 tag.
*/ */
...@@ -589,6 +592,9 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -589,6 +592,9 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
else else
data.rq_flags |= RQF_ELV; data.rq_flags |= RQF_ELV;
if (flags & BLK_MQ_REQ_RESERVED)
data.rq_flags |= RQF_RESV;
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
tag = blk_mq_get_tag(&data); tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG) if (tag == BLK_MQ_NO_TAG)
......
...@@ -57,6 +57,7 @@ typedef __u32 __bitwise req_flags_t; ...@@ -57,6 +57,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* queue has elevator attached */ /* queue has elevator attached */
#define RQF_ELV ((__force req_flags_t)(1 << 22)) #define RQF_ELV ((__force req_flags_t)(1 << 22))
#define RQF_RESV ((__force req_flags_t)(1 << 23))
/* flags that prevent us from merging requests: */ /* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \ #define RQF_NOMERGE_FLAGS \
...@@ -825,6 +826,11 @@ static inline bool blk_mq_need_time_stamp(struct request *rq) ...@@ -825,6 +826,11 @@ static inline bool blk_mq_need_time_stamp(struct request *rq)
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
} }
static inline bool blk_mq_is_reserved_rq(struct request *rq)
{
return rq->rq_flags & RQF_RESV;
}
/* /*
* Batched completions only work when there is no I/O error and no special * Batched completions only work when there is no I/O error and no special
* ->end_io handler. * ->end_io handler.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment