Commit 450991bc authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] blk: __make_request efficiency

In the case where the request is not able to be merged by the elevator, don't
retake the lock and retry the merge mechanism after allocating a new request.

Instead assume that the chance of a merge remains slim, and now that we've
done most of the work allocating a request we may as well just go with it.

Also be rid of the GFP_ATOMIC allocation: we've got working mempools for the
block layer now, so let's save atomic memory for things like networking.

Lastly, in get_request_wait, do an initial get_request call before going into
the waitqueue.  This is reported to help efficiency.
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 69f63c5c
...@@ -1971,10 +1971,11 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, ...@@ -1971,10 +1971,11 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
static struct request *get_request_wait(request_queue_t *q, int rw, static struct request *get_request_wait(request_queue_t *q, int rw,
struct bio *bio) struct bio *bio)
{ {
DEFINE_WAIT(wait);
struct request *rq; struct request *rq;
do { rq = get_request(q, rw, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait, prepare_to_wait_exclusive(&rl->wait[rw], &wait,
...@@ -1999,7 +2000,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, ...@@ -1999,7 +2000,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
put_io_context(ioc); put_io_context(ioc);
} }
finish_wait(&rl->wait[rw], &wait); finish_wait(&rl->wait[rw], &wait);
} while (!rq); }
return rq; return rq;
} }
...@@ -2521,7 +2522,7 @@ EXPORT_SYMBOL(blk_attempt_remerge); ...@@ -2521,7 +2522,7 @@ EXPORT_SYMBOL(blk_attempt_remerge);
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
unsigned short prio; unsigned short prio;
sector_t sector; sector_t sector;
...@@ -2549,14 +2550,9 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2549,14 +2550,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
goto end_io; goto end_io;
} }
again:
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q)) { if (unlikely(barrier) || elv_queue_empty(q))
blk_plug_device(q);
goto get_rq;
}
if (barrier)
goto get_rq; goto get_rq;
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &req, bio);
...@@ -2601,40 +2597,23 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2601,40 +2597,23 @@ static int __make_request(request_queue_t *q, struct bio *bio)
elv_merged_request(q, req); elv_merged_request(q, req);
goto out; goto out;
/* /* ELV_NO_MERGE: elevator says don't/can't merge. */
* elevator says don't/can't merge. get new request
*/
case ELEVATOR_NO_MERGE:
break;
default: default:
printk("elevator returned crap (%d)\n", el_ret); ;
BUG();
} }
get_rq:
/* /*
* Grab a free request from the freelist - if that is empty, check * Grab a free request. This is might sleep but can not fail.
* if we are doing read ahead and abort instead of blocking for */
* a free slot. spin_unlock_irq(q->queue_lock);
req = get_request_wait(q, rw, bio);
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
* We don't worry about that case for efficiency. It won't happen
* often, and the elevators are able to handle it.
*/ */
get_rq:
if (freereq) {
req = freereq;
freereq = NULL;
} else {
spin_unlock_irq(q->queue_lock);
if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
/*
* READA bit set
*/
err = -EWOULDBLOCK;
if (bio_rw_ahead(bio))
goto end_io;
freereq = get_request_wait(q, rw, bio);
}
goto again;
}
req->flags |= REQ_CMD; req->flags |= REQ_CMD;
...@@ -2663,10 +2642,11 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2663,10 +2642,11 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->rq_disk = bio->bi_bdev->bd_disk; req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies; req->start_time = jiffies;
spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req); add_request(q, req);
out: out:
if (freereq)
__blk_put_request(q, freereq);
if (sync) if (sync)
__generic_unplug_device(q); __generic_unplug_device(q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment