Commit d280bab3 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Introduce request_queue.initialize_rq_fn()

Several block drivers need to initialize the driver-private request
data after having called blk_get_request() and before .prep_rq_fn()
is called, e.g. when submitting a REQ_OP_SCSI_* request. Avoid that
that initialization code has to be repeated after every
blk_get_request() call by adding new callback functions to struct
request_queue and to struct blk_mq_ops.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Omar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cd6ce148
...@@ -1372,12 +1372,21 @@ static struct request *blk_old_get_request(struct request_queue *q, ...@@ -1372,12 +1372,21 @@ static struct request *blk_old_get_request(struct request_queue *q,
struct request *blk_get_request(struct request_queue *q, unsigned int op, struct request *blk_get_request(struct request_queue *q, unsigned int op,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
if (q->mq_ops) struct request *req;
return blk_mq_alloc_request(q, op,
if (q->mq_ops) {
req = blk_mq_alloc_request(q, op,
(gfp_mask & __GFP_DIRECT_RECLAIM) ? (gfp_mask & __GFP_DIRECT_RECLAIM) ?
0 : BLK_MQ_REQ_NOWAIT); 0 : BLK_MQ_REQ_NOWAIT);
else if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
return blk_old_get_request(q, op, gfp_mask); q->mq_ops->initialize_rq_fn(req);
} else {
req = blk_old_get_request(q, op, gfp_mask);
if (!IS_ERR(req) && q->initialize_rq_fn)
q->initialize_rq_fn(req);
}
return req;
} }
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
......
...@@ -144,6 +144,8 @@ struct blk_mq_ops { ...@@ -144,6 +144,8 @@ struct blk_mq_ops {
init_request_fn *init_request; init_request_fn *init_request;
exit_request_fn *exit_request; exit_request_fn *exit_request;
reinit_request_fn *reinit_request; reinit_request_fn *reinit_request;
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
map_queues_fn *map_queues; map_queues_fn *map_queues;
......
...@@ -410,8 +410,12 @@ struct request_queue { ...@@ -410,8 +410,12 @@ struct request_queue {
rq_timed_out_fn *rq_timed_out_fn; rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed; dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn; lld_busy_fn *lld_busy_fn;
/* Called just after a request is allocated */
init_rq_fn *init_rq_fn; init_rq_fn *init_rq_fn;
/* Called just before a request is freed */
exit_rq_fn *exit_rq_fn; exit_rq_fn *exit_rq_fn;
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
const struct blk_mq_ops *mq_ops; const struct blk_mq_ops *mq_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment