Commit 52d9e675 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

[BLOCK] ll_rw_blk: separate out bio init part from __make_request

Separate out bio initialization part from __make_request.  It
will be used by the following blk_ordered_reimpl.
Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 8ffdc655
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
static void blk_unplug_work(void *data); static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data); static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(request_queue_t *q, struct bio *bio);
/* /*
* For the allocated request tables * For the allocated request tables
...@@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q) ...@@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q)
return 0; return 0;
} }
static int __make_request(request_queue_t *, struct bio *);
request_queue_t *blk_alloc_queue(gfp_t gfp_mask) request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
{ {
return blk_alloc_queue_node(gfp_mask, -1); return blk_alloc_queue_node(gfp_mask, -1);
...@@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq) ...@@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(blk_attempt_remerge); EXPORT_SYMBOL(blk_attempt_remerge);
static void init_request_from_bio(struct request *req, struct bio *bio)
{
req->flags |= REQ_CMD;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
req->nr_phys_segments = bio_phys_segments(req->q, bio);
req->nr_hw_segments = bio_hw_segments(req->q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
}
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req; struct request *req;
...@@ -2754,33 +2784,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2754,33 +2784,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* We don't worry about that case for efficiency. It won't happen * We don't worry about that case for efficiency. It won't happen
* often, and the elevators are able to handle it. * often, and the elevators are able to handle it.
*/ */
init_request_from_bio(req, bio);
req->flags |= REQ_CMD;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (unlikely(barrier))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
req->nr_phys_segments = bio_phys_segments(q, bio);
req->nr_hw_segments = bio_hw_segments(q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q)) if (elv_queue_empty(q))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment