Commit 9476f39d authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: introduce a bio_set to allocate housekeeping bios from

Don't rely on availability of bios from the global fs_bio_set,
we should use our own bio_set for meta data IO.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 3c2f7a85
...@@ -115,7 +115,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -115,7 +115,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
rw |= REQ_FUA | REQ_FLUSH; rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC; rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev; bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector; bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size); ok = (bio_add_page(bio, page, size, 0) == size);
......
...@@ -953,8 +953,7 @@ static void bm_async_io_complete(struct bio *bio, int error) ...@@ -953,8 +953,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{ {
/* we are process context. we always get a bio */ struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct bio *bio = bio_alloc(GFP_NOIO, 1);
struct drbd_conf *mdev = ctx->mdev; struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap; struct drbd_bitmap *b = mdev->bitmap;
struct page *page; struct page *page;
......
...@@ -1522,6 +1522,12 @@ extern wait_queue_head_t drbd_pp_wait; ...@@ -1522,6 +1522,12 @@ extern wait_queue_head_t drbd_pp_wait;
#define DRBD_MIN_POOL_PAGES 128 #define DRBD_MIN_POOL_PAGES 128
extern mempool_t *drbd_md_io_page_pool; extern mempool_t *drbd_md_io_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
extern struct bio_set *drbd_md_io_bio_set;
/* to allocate from that set */
extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock; extern rwlock_t global_state_lock;
extern struct drbd_conf *drbd_new_device(unsigned int minor); extern struct drbd_conf *drbd_new_device(unsigned int minor);
......
...@@ -140,6 +140,7 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ ...@@ -140,6 +140,7 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool; mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool; mempool_t *drbd_ee_mempool;
mempool_t *drbd_md_io_page_pool; mempool_t *drbd_md_io_page_pool;
struct bio_set *drbd_md_io_bio_set;
/* I do not use a standard mempool, because: /* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first. 1) I want to hand out the pre-allocated objects first.
...@@ -160,6 +161,25 @@ static const struct block_device_operations drbd_ops = { ...@@ -160,6 +161,25 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release, .release = drbd_release,
}; };
static void bio_destructor_drbd(struct bio *bio)
{
bio_free(bio, drbd_md_io_bio_set);
}
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
struct bio *bio;
if (!drbd_md_io_bio_set)
return bio_alloc(gfp_mask, 1);
bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
if (!bio)
return NULL;
bio->bi_destructor = bio_destructor_drbd;
return bio;
}
#ifdef __CHECKER__ #ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will /* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works. give tons of false positives. When this is a real functions sparse works.
...@@ -3263,6 +3283,8 @@ static void drbd_destroy_mempools(void) ...@@ -3263,6 +3283,8 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
if (drbd_md_io_bio_set)
bioset_free(drbd_md_io_bio_set);
if (drbd_md_io_page_pool) if (drbd_md_io_page_pool)
mempool_destroy(drbd_md_io_page_pool); mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool) if (drbd_ee_mempool)
...@@ -3278,6 +3300,7 @@ static void drbd_destroy_mempools(void) ...@@ -3278,6 +3300,7 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache) if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache); kmem_cache_destroy(drbd_al_ext_cache);
drbd_md_io_bio_set = NULL;
drbd_md_io_page_pool = NULL; drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL; drbd_ee_mempool = NULL;
drbd_request_mempool = NULL; drbd_request_mempool = NULL;
...@@ -3303,6 +3326,7 @@ static int drbd_create_mempools(void) ...@@ -3303,6 +3326,7 @@ static int drbd_create_mempools(void)
drbd_al_ext_cache = NULL; drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL; drbd_pp_pool = NULL;
drbd_md_io_page_pool = NULL; drbd_md_io_page_pool = NULL;
drbd_md_io_bio_set = NULL;
/* caches */ /* caches */
drbd_request_cache = kmem_cache_create( drbd_request_cache = kmem_cache_create(
...@@ -3326,6 +3350,12 @@ static int drbd_create_mempools(void) ...@@ -3326,6 +3350,12 @@ static int drbd_create_mempools(void)
goto Enomem; goto Enomem;
/* mempools */ /* mempools */
#ifdef COMPAT_HAVE_BIOSET_CREATE
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
#endif
drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_page_pool == NULL) if (drbd_md_io_page_pool == NULL)
goto Enomem; goto Enomem;
......
...@@ -1106,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, ...@@ -1106,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
/* In most cases, we will only need one bio. But in case the lower /* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this * level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the * side than those of the sending peer, we may need to submit the
* request in more than one bio. */ * request in more than one bio.
*
* Plain bio_alloc is good enough here, this is no DRBD internally
* generated bio, but a bio allocated on behalf of the peer.
*/
next_bio: next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages); bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) { if (!bio) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment