Commit 15e9369d authored by Jens Axboe's avatar Jens Axboe

[PATCH] bio splitting

So here it is, easy split support for md and dm. Neil, the changes over
your version are merely:

- Make a global bio split pool instead of requring device setup of one.
  Will waste 8 * sizeof(struct bio_pair) of RAM, but... For 2.6 at least
  it has to be a core functionality.

- Various style changes to follow the kernel guide lines.
parent 4a2b3e53
......@@ -33,6 +33,13 @@ static kmem_cache_t *bio_slab;
#define BIOVEC_NR_POOLS 6
/*
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
*/
#define BIO_SPLIT_ENTRIES 8
static mempool_t *bio_split_pool;
struct biovec_pool {
int nr_vecs;
char *name;
......@@ -734,6 +741,91 @@ void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
bio->bi_end_io(bio, bytes_done, error);
}
void bio_pair_release(struct bio_pair *bp)
{
if (atomic_dec_and_test(&bp->cnt)) {
struct bio *master = bp->bio1.bi_private;
bio_endio(master, master->bi_size, bp->error);
mempool_free(bp, bp->bio2.bi_private);
}
}
static int bio_pair_end_1(struct bio * bi, unsigned int done, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
if (bi->bi_size)
return 1;
if (err)
bp->error = err;
bio_pair_release(bp);
return 0;
}
static int bio_pair_end_2(struct bio * bi, unsigned int done, int err)
{
struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
if (bi->bi_size)
return 1;
if (err)
bp->error = err;
bio_pair_release(bp);
return 0;
}
/*
* split a bio - only worry about a bio with a single page
* in it's iovec
*/
struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
{
struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO);
if (!bp)
return bp;
BUG_ON(bi->bi_vcnt != 1);
BUG_ON(bi->bi_idx != 0);
atomic_set(&bp->cnt, 3);
bp->error = 0;
bp->bio1 = *bi;
bp->bio2 = *bi;
bp->bio2.bi_sector += first_sectors;
bp->bio2.bi_size -= first_sectors << 9;
bp->bio1.bi_size = first_sectors << 9;
bp->bv1 = bi->bi_io_vec[0];
bp->bv2 = bi->bi_io_vec[0];
bp->bv2.bv_offset += first_sectors << 9;
bp->bv2.bv_len -= first_sectors << 9;
bp->bv1.bv_len = first_sectors << 9;
bp->bio1.bi_io_vec = &bp->bv1;
bp->bio2.bi_io_vec = &bp->bv2;
bp->bio1.bi_end_io = bio_pair_end_1;
bp->bio2.bi_end_io = bio_pair_end_2;
bp->bio1.bi_private = bi;
bp->bio2.bi_private = pool;
return bp;
}
static void *bio_pair_alloc(int gfp_flags, void *data)
{
return kmalloc(sizeof(struct bio_pair), gfp_flags);
}
static void bio_pair_free(void *bp, void *data)
{
kfree(bp);
}
static void __init biovec_init_pools(void)
{
int i, size, megabytes, pool_entries = BIO_POOL_SIZE;
......@@ -800,6 +892,10 @@ static int __init init_bio(void)
biovec_init_pools();
bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, bio_pair_alloc, bio_pair_free, NULL);
if (!bio_split_pool)
panic("bio: can't create split pool\n");
return 0;
}
......@@ -818,3 +914,5 @@ EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
......@@ -22,6 +22,7 @@
#include <linux/kdev_t.h>
#include <linux/highmem.h>
#include <linux/mempool.h>
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
#include <asm/io.h>
......@@ -202,6 +203,27 @@ struct bio {
*/
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
/*
* A bio_pair is used when we need to split a bio.
* This can only happen for a bio that refers to just one
* page of data, and in the unusual situation when the
* page crosses a chunk/device boundary
*
* The address of the master bio is stored in bio1.bi_private
* The address of the pool the pair was allocated from is stored
* in bio2.bi_private
*/
struct bio_pair {
struct bio bio1, bio2;
struct bio_vec bv1, bv2;
atomic_t cnt;
int error;
};
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment