Commit ac5c28b4 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] loop: make bio_copy private to loop

From: Hugh Dickins <hugh@veritas.com>

bio_copy is used only by the loop driver, which already has to walk the bio
segments itself: so it makes sense to change it from bio.c export to loop.c
static, as prelude to working upon it there.

bio_copy itself is unchanged by this patch, with one exception.  On oom
failure it must use bio_put, instead of mempool_free to static bio_pool:
which it should have been doing all along - it was leaking the veclist.

(Grudgingly acked by Jens)
parent 555bae04
...@@ -439,6 +439,74 @@ static int loop_end_io_transfer(struct bio *bio, unsigned int bytes_done, int er ...@@ -439,6 +439,74 @@ static int loop_end_io_transfer(struct bio *bio, unsigned int bytes_done, int er
return 0; return 0;
} }
static struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
{
struct bio *b = bio_alloc(gfp_mask, bio->bi_vcnt);
unsigned long flags = 0; /* gcc silly */
struct bio_vec *bv;
int i;
if (unlikely(!b))
return NULL;
/*
* iterate iovec list and alloc pages + copy data
*/
__bio_for_each_segment(bv, bio, i, 0) {
struct bio_vec *bbv = &b->bi_io_vec[i];
char *vfrom, *vto;
bbv->bv_page = alloc_page(gfp_mask);
if (bbv->bv_page == NULL)
goto oom;
bbv->bv_len = bv->bv_len;
bbv->bv_offset = bv->bv_offset;
/*
* if doing a copy for a READ request, no need
* to memcpy page data
*/
if (!copy)
continue;
if (gfp_mask & __GFP_WAIT) {
vfrom = kmap(bv->bv_page);
vto = kmap(bbv->bv_page);
} else {
local_irq_save(flags);
vfrom = kmap_atomic(bv->bv_page, KM_BIO_SRC_IRQ);
vto = kmap_atomic(bbv->bv_page, KM_BIO_DST_IRQ);
}
memcpy(vto + bbv->bv_offset, vfrom + bv->bv_offset, bv->bv_len);
if (gfp_mask & __GFP_WAIT) {
kunmap(bbv->bv_page);
kunmap(bv->bv_page);
} else {
kunmap_atomic(vto, KM_BIO_DST_IRQ);
kunmap_atomic(vfrom, KM_BIO_SRC_IRQ);
local_irq_restore(flags);
}
}
b->bi_sector = bio->bi_sector;
b->bi_bdev = bio->bi_bdev;
b->bi_rw = bio->bi_rw;
b->bi_vcnt = bio->bi_vcnt;
b->bi_size = bio->bi_size;
return b;
oom:
while (--i >= 0)
__free_page(b->bi_io_vec[i].bv_page);
bio_put(bio);
return NULL;
}
static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh) static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh)
{ {
struct bio *bio; struct bio *bio;
......
...@@ -258,84 +258,6 @@ struct bio *bio_clone(struct bio *bio, int gfp_mask) ...@@ -258,84 +258,6 @@ struct bio *bio_clone(struct bio *bio, int gfp_mask)
return b; return b;
} }
/**
* bio_copy - create copy of a bio
* @bio: bio to copy
* @gfp_mask: allocation priority
* @copy: copy data to allocated bio
*
* Create a copy of a &bio. Caller will own the returned bio and
* the actual data it points to. Reference count of returned
* bio will be one.
*/
struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
{
struct bio *b = bio_alloc(gfp_mask, bio->bi_vcnt);
unsigned long flags = 0; /* gcc silly */
struct bio_vec *bv;
int i;
if (unlikely(!b))
return NULL;
/*
* iterate iovec list and alloc pages + copy data
*/
__bio_for_each_segment(bv, bio, i, 0) {
struct bio_vec *bbv = &b->bi_io_vec[i];
char *vfrom, *vto;
bbv->bv_page = alloc_page(gfp_mask);
if (bbv->bv_page == NULL)
goto oom;
bbv->bv_len = bv->bv_len;
bbv->bv_offset = bv->bv_offset;
/*
* if doing a copy for a READ request, no need
* to memcpy page data
*/
if (!copy)
continue;
if (gfp_mask & __GFP_WAIT) {
vfrom = kmap(bv->bv_page);
vto = kmap(bbv->bv_page);
} else {
local_irq_save(flags);
vfrom = kmap_atomic(bv->bv_page, KM_BIO_SRC_IRQ);
vto = kmap_atomic(bbv->bv_page, KM_BIO_DST_IRQ);
}
memcpy(vto + bbv->bv_offset, vfrom + bv->bv_offset, bv->bv_len);
if (gfp_mask & __GFP_WAIT) {
kunmap(bbv->bv_page);
kunmap(bv->bv_page);
} else {
kunmap_atomic(vto, KM_BIO_DST_IRQ);
kunmap_atomic(vfrom, KM_BIO_SRC_IRQ);
local_irq_restore(flags);
}
}
b->bi_sector = bio->bi_sector;
b->bi_bdev = bio->bi_bdev;
b->bi_rw = bio->bi_rw;
b->bi_vcnt = bio->bi_vcnt;
b->bi_size = bio->bi_size;
return b;
oom:
while (--i >= 0)
__free_page(b->bi_io_vec[i].bv_page);
mempool_free(b, bio_pool);
return NULL;
}
/** /**
* bio_get_nr_vecs - return approx number of vecs * bio_get_nr_vecs - return approx number of vecs
* @bdev: I/O target * @bdev: I/O target
...@@ -905,7 +827,6 @@ EXPORT_SYMBOL(bio_alloc); ...@@ -905,7 +827,6 @@ EXPORT_SYMBOL(bio_alloc);
EXPORT_SYMBOL(bio_put); EXPORT_SYMBOL(bio_put);
EXPORT_SYMBOL(bio_endio); EXPORT_SYMBOL(bio_endio);
EXPORT_SYMBOL(bio_init); EXPORT_SYMBOL(bio_init);
EXPORT_SYMBOL(bio_copy);
EXPORT_SYMBOL(__bio_clone); EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_clone); EXPORT_SYMBOL(bio_clone);
EXPORT_SYMBOL(bio_phys_segments); EXPORT_SYMBOL(bio_phys_segments);
......
...@@ -235,7 +235,6 @@ extern inline int bio_hw_segments(struct request_queue *, struct bio *); ...@@ -235,7 +235,6 @@ extern inline int bio_hw_segments(struct request_queue *, struct bio *);
extern inline void __bio_clone(struct bio *, struct bio *); extern inline void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, int); extern struct bio *bio_clone(struct bio *, int);
extern struct bio *bio_copy(struct bio *, int, int);
extern inline void bio_init(struct bio *); extern inline void bio_init(struct bio *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment