Commit e6a40b09 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe

block: prepare request creation/destruction code to use REQ_OPs

This patch prepares *_get_request/*_put_request and freed_request,
to use separate variables for the operation and flags. In the
next patches the struct request users will be converted like
was done for bios where the op and flags are set separately.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4993b77d
...@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync) ...@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
* A request has just been released. Account for it, update the full and * A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock. * congestion status, wake up any waiters. Called under q->queue_lock.
*/ */
static void freed_request(struct request_list *rl, unsigned int flags) static void freed_request(struct request_list *rl, int op, unsigned int flags)
{ {
struct request_queue *q = rl->q; struct request_queue *q = rl->q;
int sync = rw_is_sync(flags); int sync = rw_is_sync(op | flags);
q->nr_rqs[sync]--; q->nr_rqs[sync]--;
rl->count[sync]--; rl->count[sync]--;
...@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio) ...@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
/** /**
* __get_request - get a free request * __get_request - get a free request
* @rl: request list to allocate from * @rl: request list to allocate from
* @rw_flags: RW and SYNC flags * @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* *
...@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio) ...@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held. * Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *__get_request(struct request_list *rl, int rw_flags, static struct request *__get_request(struct request_list *rl, int op,
struct bio *bio, gfp_t gfp_mask) int op_flags, struct bio *bio,
gfp_t gfp_mask)
{ {
struct request_queue *q = rl->q; struct request_queue *q = rl->q;
struct request *rq; struct request *rq;
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio); struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL; struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0; const bool is_sync = rw_is_sync(op | op_flags) != 0;
int may_queue; int may_queue;
if (unlikely(blk_queue_dying(q))) if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
may_queue = elv_may_queue(q, rw_flags); may_queue = elv_may_queue(q, op | op_flags);
if (may_queue == ELV_MQUEUE_NO) if (may_queue == ELV_MQUEUE_NO)
goto rq_starved; goto rq_starved;
...@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
/* /*
* Decide whether the new request will be managed by elevator. If * Decide whether the new request will be managed by elevator. If
* so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new * prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and * request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe. * makes creating new ones safe.
...@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* it will be created after releasing queue_lock. * it will be created after releasing queue_lock.
*/ */
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV; op_flags |= REQ_ELVPRIV;
q->nr_rqs_elvpriv++; q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc) if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q); icq = ioc_lookup_icq(ioc, q);
} }
if (blk_queue_io_stat(q)) if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT; op_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* allocate and init request */ /* allocate and init request */
...@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
blk_rq_init(q, rq); blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl); blk_rq_set_rl(rq, rl);
rq->cmd_flags = rw_flags | REQ_ALLOCED; req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
/* init elvpriv */ /* init elvpriv */
if (rw_flags & REQ_ELVPRIV) { if (op_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) { if (unlikely(et->icq_cache && !icq)) {
if (ioc) if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask); icq = ioc_create_icq(ioc, q, gfp_mask);
...@@ -1178,7 +1180,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1178,7 +1180,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
if (ioc_batching(q, ioc)) if (ioc_batching(q, ioc))
ioc->nr_batch_requests--; ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1); trace_block_getrq(q, bio, op);
return rq; return rq;
fail_elvpriv: fail_elvpriv:
...@@ -1208,7 +1210,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1208,7 +1210,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* queue, but this is pretty rare. * queue, but this is pretty rare.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
freed_request(rl, rw_flags); freed_request(rl, op, op_flags);
/* /*
* in the very unlikely event that allocation failed and no * in the very unlikely event that allocation failed and no
...@@ -1226,7 +1228,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1226,7 +1228,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
/** /**
* get_request - get a free request * get_request - get a free request
* @q: request_queue to allocate request from * @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags * @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* *
...@@ -1237,17 +1240,18 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, ...@@ -1237,17 +1240,18 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
* Returns ERR_PTR on failure, with @q->queue_lock held. * Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *get_request(struct request_queue *q, int rw_flags, static struct request *get_request(struct request_queue *q, int op,
struct bio *bio, gfp_t gfp_mask) int op_flags, struct bio *bio,
gfp_t gfp_mask)
{ {
const bool is_sync = rw_is_sync(rw_flags) != 0; const bool is_sync = rw_is_sync(op | op_flags) != 0;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct request_list *rl; struct request_list *rl;
struct request *rq; struct request *rq;
rl = blk_get_rl(q, bio); /* transferred to @rq on success */ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry: retry:
rq = __get_request(rl, rw_flags, bio, gfp_mask); rq = __get_request(rl, op, op_flags, bio, gfp_mask);
if (!IS_ERR(rq)) if (!IS_ERR(rq))
return rq; return rq;
...@@ -1260,7 +1264,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -1260,7 +1264,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
trace_block_sleeprq(q, bio, rw_flags & 1); trace_block_sleeprq(q, bio, op);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
io_schedule(); io_schedule();
...@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, ...@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node); create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask); rq = get_request(q, rw, 0, NULL, gfp_mask);
if (IS_ERR(rq)) if (IS_ERR(rq))
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */ /* q->queue_lock is unlocked at this point */
...@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) ...@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/ */
if (req->cmd_flags & REQ_ALLOCED) { if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags; unsigned int flags = req->cmd_flags;
int op = req_op(req);
struct request_list *rl = blk_rq_rl(req); struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req)); BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req); blk_free_request(rl, req);
freed_request(rl, flags); freed_request(rl, op, flags);
blk_put_rl(rl); blk_put_rl(rl);
} }
} }
...@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{ {
const bool sync = !!(bio->bi_rw & REQ_SYNC); const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug; struct blk_plug *plug;
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req; struct request *req;
unsigned int request_count = 0; unsigned int request_count = 0;
...@@ -1772,7 +1777,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1772,7 +1777,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* but we need to set it earlier to expose the sync flag to the * but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers. * rq allocator and io schedulers.
*/ */
rw_flags = bio_data_dir(bio);
if (sync) if (sync)
rw_flags |= REQ_SYNC; rw_flags |= REQ_SYNC;
...@@ -1780,7 +1784,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1780,7 +1784,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* Grab a free request. This is might sleep but can not fail. * Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
req = get_request(q, rw_flags, bio, GFP_NOIO); req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) { if (IS_ERR(req)) {
bio->bi_error = PTR_ERR(req); bio->bi_error = PTR_ERR(req);
bio_endio(bio); bio_endio(bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment