Commit 399254aa authored by Jens Axboe's avatar Jens Axboe

block: add BIO_NO_PAGE_REF flag

If bio_iov_iter_get_pages() is called on an iov_iter that is flagged
with NO_REF, then we don't need to add a page reference for the pages
that we add.

Add BIO_NO_PAGE_REF to track this in the bio, so IO completion knows
not to drop a reference to these pages.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 875f1d07
...@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) ...@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
size = bio_add_page(bio, bv->bv_page, len, size = bio_add_page(bio, bv->bv_page, len,
bv->bv_offset + iter->iov_offset); bv->bv_offset + iter->iov_offset);
if (size == len) { if (size == len) {
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
struct page *page; struct page *page;
int i; int i;
/*
* For the normal O_DIRECT case, we could skip grabbing this
* reference and then not have to put them again when IO
* completes. But this breaks some in-kernel users, like
* splicing to/from a loop device, where we release the pipe
* pages unconditionally. If we can fix that case, we can
* get rid of the get here and the need to call
* bio_release_pages() at IO completion time.
*/
mp_bvec_for_each_page(page, bv, i) mp_bvec_for_each_page(page, bv, i)
get_page(page); get_page(page);
}
iov_iter_advance(iter, size); iov_iter_advance(iter, size);
return 0; return 0;
} }
...@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
* This takes either an iterator pointing to user memory, or one pointing to * This takes either an iterator pointing to user memory, or one pointing to
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
* map them into the kernel. On IO completion, the caller should put those * map them into the kernel. On IO completion, the caller should put those
* pages. For now, when adding kernel pages, we still grab a reference to the * pages. If we're adding kernel pages, and the caller told us it's safe to
* page. This isn't strictly needed for the common case, but some call paths * do so, we just have to add the pages to the bio directly. We don't grab an
* end up releasing pages from eg a pipe and we can't easily control these. * extra reference to those pages (the user should already have that), and we
* See comment in __bio_iov_bvec_add_pages(). * don't put the page on IO completion. The caller needs to check if the bio is
* flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
* released.
* *
* The function tries, but does not guarantee, to pin as many pages as * The function tries, but does not guarantee, to pin as many pages as
* fit into the bio, or are requested in *iter, whatever is smaller. If * fit into the bio, or are requested in *iter, whatever is smaller. If
...@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
const bool is_bvec = iov_iter_is_bvec(iter); const bool is_bvec = iov_iter_is_bvec(iter);
unsigned short orig_vcnt = bio->bi_vcnt; unsigned short orig_vcnt = bio->bi_vcnt;
/*
* If this is a BVEC iter, then the pages are kernel pages. Don't
* release them on IO completion, if the caller asked us to.
*/
if (is_bvec && iov_iter_bvec_no_ref(iter))
bio_set_flag(bio, BIO_NO_PAGE_REF);
do { do {
int ret; int ret;
...@@ -1696,6 +1699,7 @@ static void bio_dirty_fn(struct work_struct *work) ...@@ -1696,6 +1699,7 @@ static void bio_dirty_fn(struct work_struct *work)
next = bio->bi_private; next = bio->bi_private;
bio_set_pages_dirty(bio); bio_set_pages_dirty(bio);
if (!bio_flagged(bio, BIO_NO_PAGE_REF))
bio_release_pages(bio); bio_release_pages(bio);
bio_put(bio); bio_put(bio);
} }
...@@ -1713,6 +1717,7 @@ void bio_check_pages_dirty(struct bio *bio) ...@@ -1713,6 +1717,7 @@ void bio_check_pages_dirty(struct bio *bio)
goto defer; goto defer;
} }
if (!bio_flagged(bio, BIO_NO_PAGE_REF))
bio_release_pages(bio); bio_release_pages(bio);
bio_put(bio); bio_put(bio);
return; return;
......
...@@ -336,12 +336,14 @@ static void blkdev_bio_end_io(struct bio *bio) ...@@ -336,12 +336,14 @@ static void blkdev_bio_end_io(struct bio *bio)
if (should_dirty) { if (should_dirty) {
bio_check_pages_dirty(bio); bio_check_pages_dirty(bio);
} else { } else {
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
struct bvec_iter_all iter_all;
struct bio_vec *bvec; struct bio_vec *bvec;
int i; int i;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, i, iter_all) bio_for_each_segment_all(bvec, bio, i, iter_all)
put_page(bvec->bv_page); put_page(bvec->bv_page);
}
bio_put(bio); bio_put(bio);
} }
} }
......
...@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio) ...@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
if (should_dirty) { if (should_dirty) {
bio_check_pages_dirty(bio); bio_check_pages_dirty(bio);
} else { } else {
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
struct bvec_iter_all iter_all;
struct bio_vec *bvec; struct bio_vec *bvec;
int i; int i;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, i, iter_all) bio_for_each_segment_all(bvec, bio, i, iter_all)
put_page(bvec->bv_page); put_page(bvec->bv_page);
}
bio_put(bio); bio_put(bio);
} }
} }
......
...@@ -215,6 +215,7 @@ struct bio { ...@@ -215,6 +215,7 @@ struct bio {
/* /*
* bio flags * bio flags
*/ */
#define BIO_NO_PAGE_REF 0 /* don't put release vec pages */
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
#define BIO_CLONED 2 /* doesn't own data */ #define BIO_CLONED 2 /* doesn't own data */
#define BIO_BOUNCED 3 /* bio is a bounce bio */ #define BIO_BOUNCED 3 /* bio is a bounce bio */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment