Commit 14ff27c5 authored by Jens Axboe's avatar Jens Axboe

[PATCH] queue merge_bvec_fn() changes

Make merge_bvec_fn() return number of bytes we can accept at a given
offset, instead of a bool.
parent e00cb941
......@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a device boundary), 0 otherwise.
* Return amount of bytes we can take at this offset
*/
static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0, *dev1;
dev_info_t *dev0;
int maxsectors, bio_sectors = (bio->bi_size + biovec->bv_len) >> 9;
dev0 = which_dev(mddev, bio->bi_sector);
dev1 = which_dev(mddev, bio->bi_sector +
((bio->bi_size + biovec->bv_len - 1) >> 9));
maxsectors = (dev0->size << 1) - (bio->bi_sector - (dev0->offset<<1));
return dev0 != dev1;
if (bio_sectors <= maxsectors)
return biovec->bv_len;
return (maxsectors << 9) - bio->bi_size;
}
static int linear_run (mddev_t *mddev)
......
......@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a chunk boundary), 0 otherwise.
* Return amount of bytes we can accept at this offset
*/
static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{
......@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block = bio->bi_sector >> 1;
bio_sz = (bio->bi_size + biovec->bv_len) >> 10;
return chunk_size < ((block & (chunk_size - 1)) + bio_sz);
return (chunk_size - ((block & (chunk_size - 1)) + bio_sz)) << 10;
}
static int raid0_run (mddev_t *mddev)
......
......@@ -354,7 +354,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
request_queue_t *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = q->max_sectors >> (PAGE_SHIFT - 9);
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages > q->max_phys_segments)
nr_pages = q->max_phys_segments;
if (nr_pages > q->max_hw_segments)
......@@ -385,13 +385,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* cloned bio must not modify vec list
*/
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 1;
return 0;
if (bio->bi_vcnt >= bio->bi_max_vecs)
return 1;
return 0;
if (((bio->bi_size + len) >> 9) > q->max_sectors)
return 1;
return 0;
/*
* we might loose a segment or two here, but rather that than
......@@ -404,7 +404,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
if (fail_segments) {
if (retried_segments)
return 1;
return 0;
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
retried_segments = 1;
......@@ -425,18 +425,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control
*/
if (q->merge_bvec_fn && q->merge_bvec_fn(q, bio, bvec)) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
return 1;
if (q->merge_bvec_fn) {
/*
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
if (q->merge_bvec_fn(q, bio, bvec) < len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
return 0;
}
}
bio->bi_vcnt++;
bio->bi_phys_segments++;
bio->bi_hw_segments++;
bio->bi_size += len;
return 0;
return len;
}
/**
......
......@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
/* Take a ref against the page each time it is placed into a BIO */
page_cache_get(page);
if (bio_add_page(dio->bio, page, bv_len, bv_offset)) {
if (bio_add_page(dio->bio, page, bv_len, bv_offset) < bv_len) {
dio_bio_submit(dio);
ret = dio_new_bio(dio, blkno);
if (ret == 0) {
ret = bio_add_page(dio->bio, page, bv_len, bv_offset);
BUG_ON(ret != 0);
BUG_ON(ret < bv_len);
} else {
/* The page didn't make it into a BIO */
page_cache_release(page);
......
......@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL;
struct buffer_head bh;
int length;
if (page_has_buffers(page))
goto confused;
......@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused;
}
if (bio_add_page(bio, page, first_hole << blkbits, 0)) {
length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(READ, bio);
goto alloc_new;
}
......@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
int boundary = 0;
sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL;
int length;
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
......@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers(page);
}
if (bio_add_page(bio, page, first_unmapped << blkbits, 0)) {
length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(WRITE, bio);
goto alloc_new;
}
......
......@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
if (nbytes > size)
nbytes = size;
if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset))
if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset) < nbytes)
break;
offset = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment