Commit cb34e057 authored by Kent Overstreet's avatar Kent Overstreet

block: Convert some code to bio_for_each_segment_all()

More prep work for immutable bvecs:

A few places in the code were either open coding or using the wrong
version - fix.

After we introduce the bvec iter, it'll no longer be possible to modify
the biovec through bio_for_each_segment_all() - it doesn't increment a
pointer to the current bvec, you pass in a struct bio_vec (not a
pointer) which is updated with what the current biovec would be (taking
into account bi_bvec_done and bi_size).

So because of that it's more worthwhile to be consistent about
bio_for_each_segment()/bio_for_each_segment_all() usage.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: NeilBrown <neilb@suse.de>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Alexander Viro <viro@zeniv.linux.org.uk>
parent d74c6d51
...@@ -858,8 +858,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) ...@@ -858,8 +858,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
unsigned int i; unsigned int i;
struct bio_vec *bv; struct bio_vec *bv;
for (i = 0; i < clone->bi_vcnt; i++) { bio_for_each_segment_all(bv, clone, i) {
bv = bio_iovec_idx(clone, i);
BUG_ON(!bv->bv_page); BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool); mempool_free(bv->bv_page, cc->page_pool);
bv->bv_page = NULL; bv->bv_page = NULL;
......
...@@ -925,7 +925,7 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) ...@@ -925,7 +925,7 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
if (unlikely(!bvecs)) if (unlikely(!bvecs))
return; return;
bio_for_each_segment(bvec, bio, i) { bio_for_each_segment_all(bvec, bio, i) {
bvecs[i] = *bvec; bvecs[i] = *bvec;
bvecs[i].bv_page = alloc_page(GFP_NOIO); bvecs[i].bv_page = alloc_page(GFP_NOIO);
if (unlikely(!bvecs[i].bv_page)) if (unlikely(!bvecs[i].bv_page))
...@@ -1284,12 +1284,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1284,12 +1284,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
struct bio_vec *bvec; struct bio_vec *bvec;
int j; int j;
/* Yes, I really want the '__' version so that /*
* we clear any unused pointer in the io_vec, rather * We trimmed the bio, so _all is legit
* than leave them unchanged. This is important
* because when we come to free the pages, we won't
* know the original bi_idx, so we just free
* them all
*/ */
bio_for_each_segment_all(bvec, mbio, j) bio_for_each_segment_all(bvec, mbio, j)
bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
......
...@@ -1548,11 +1548,11 @@ EXPORT_SYMBOL(bio_copy_kern); ...@@ -1548,11 +1548,11 @@ EXPORT_SYMBOL(bio_copy_kern);
*/ */
void bio_set_pages_dirty(struct bio *bio) void bio_set_pages_dirty(struct bio *bio)
{ {
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec;
int i; int i;
for (i = 0; i < bio->bi_vcnt; i++) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec[i].bv_page; struct page *page = bvec->bv_page;
if (page && !PageCompound(page)) if (page && !PageCompound(page))
set_page_dirty_lock(page); set_page_dirty_lock(page);
...@@ -1561,11 +1561,11 @@ void bio_set_pages_dirty(struct bio *bio) ...@@ -1561,11 +1561,11 @@ void bio_set_pages_dirty(struct bio *bio)
static void bio_release_pages(struct bio *bio) static void bio_release_pages(struct bio *bio)
{ {
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec;
int i; int i;
for (i = 0; i < bio->bi_vcnt; i++) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec[i].bv_page; struct page *page = bvec->bv_page;
if (page) if (page)
put_page(page); put_page(page);
...@@ -1614,16 +1614,16 @@ static void bio_dirty_fn(struct work_struct *work) ...@@ -1614,16 +1614,16 @@ static void bio_dirty_fn(struct work_struct *work)
void bio_check_pages_dirty(struct bio *bio) void bio_check_pages_dirty(struct bio *bio)
{ {
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec;
int nr_clean_pages = 0; int nr_clean_pages = 0;
int i; int i;
for (i = 0; i < bio->bi_vcnt; i++) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec[i].bv_page; struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) { if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page); page_cache_release(page);
bvec[i].bv_page = NULL; bvec->bv_page = NULL;
} else { } else {
nr_clean_pages++; nr_clean_pages++;
} }
......
...@@ -441,8 +441,8 @@ static struct bio *dio_await_one(struct dio *dio) ...@@ -441,8 +441,8 @@ static struct bio *dio_await_one(struct dio *dio)
static int dio_bio_complete(struct dio *dio, struct bio *bio) static int dio_bio_complete(struct dio *dio, struct bio *bio)
{ {
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec;
int page_no; unsigned i;
if (!uptodate) if (!uptodate)
dio->io_error = -EIO; dio->io_error = -EIO;
...@@ -450,8 +450,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) ...@@ -450,8 +450,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
if (dio->is_async && dio->rw == READ) { if (dio->is_async && dio->rw == READ) {
bio_check_pages_dirty(bio); /* transfers ownership */ bio_check_pages_dirty(bio); /* transfers ownership */
} else { } else {
for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec[page_no].bv_page; struct page *page = bvec->bv_page;
if (dio->rw == READ && !PageCompound(page)) if (dio->rw == READ && !PageCompound(page))
set_page_dirty_lock(page); set_page_dirty_lock(page);
......
...@@ -231,7 +231,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -231,7 +231,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bounce: bounce:
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
bio_for_each_segment(to, bio, i) { bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page; struct page *page = to->bv_page;
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment