Commit 29e2b09a authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: collapse blk_alloc_request() into get_request()

Allocation failure handling in get_request() is about to be updated.
To ease the update, collapse blk_alloc_request() into get_request().

This patch doesn't introduce any functional change.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f9fcc2d3
......@@ -719,33 +719,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
static struct request *
blk_alloc_request(struct request_queue *q, struct bio *bio, struct io_cq *icq,
unsigned int flags, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
if (!rq)
return NULL;
blk_rq_init(q, rq);
rq->cmd_flags = flags | REQ_ALLOCED;
if (flags & REQ_ELVPRIV) {
rq->elv.icq = icq;
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
}
/* @rq->elv.icq holds on to io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}
return rq;
}
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
......@@ -968,10 +941,25 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
goto fail_alloc;
}
rq = blk_alloc_request(q, bio, icq, rw_flags, gfp_mask);
if (unlikely(!rq))
/* allocate and init request */
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
if (!rq)
goto fail_alloc;
blk_rq_init(q, rq);
rq->cmd_flags = rw_flags | REQ_ALLOCED;
if (rw_flags & REQ_ELVPRIV) {
rq->elv.icq = icq;
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
goto fail_alloc;
}
/* @rq->elv.icq holds on to io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment