Commit 57906d58 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: factor check and flush helpers from __btrfsic_submit_bio

Split out two helpers to make __btrfsic_submit_bio more readable.
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 3687fcb0
...@@ -2633,6 +2633,73 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev) ...@@ -2633,6 +2633,73 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev)
&btrfsic_dev_state_hashtable); &btrfsic_dev_state_hashtable);
} }
static void btrfsic_check_write_bio(struct bio *bio, struct btrfsic_dev_state *dev_state)
{
unsigned int segs = bio_segments(bio);
u64 dev_bytenr = 512 * bio->bi_iter.bi_sector;
u64 cur_bytenr = dev_bytenr;
struct bvec_iter iter;
struct bio_vec bvec;
char **mapped_datav;
int bio_is_patched = 0;
int i = 0;
if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info(
"submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, segs,
bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc_array(segs, sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
return;
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_len != PAGE_SIZE);
mapped_datav[i] = page_address(bvec.bv_page);
i++;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
cur_bytenr += bvec.bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr, mapped_datav, segs,
bio, &bio_is_patched, bio->bi_opf);
kfree(mapped_datav);
}
static void btrfsic_check_flush_bio(struct bio *bio, struct btrfsic_dev_state *dev_state)
{
if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
struct btrfsic_block *const block =
&dev_state->dummy_block_for_bio_bh_flush;
block->is_iodone = 0;
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = bio->bi_opf;
block->orig_bio_private = bio->bi_private;
block->orig_bio_end_io = bio->bi_end_io;
block->next_in_same_bio = NULL;
bio->bi_private = block;
bio->bi_end_io = btrfsic_bio_end_io;
} else if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE))) {
pr_info(
"btrfsic_submit_bio(%pg) with FLUSH but dummy block already in use (ignored)!\n",
dev_state->bdev);
}
}
static void __btrfsic_submit_bio(struct bio *bio) static void __btrfsic_submit_bio(struct bio *bio)
{ {
struct btrfsic_dev_state *dev_state; struct btrfsic_dev_state *dev_state;
...@@ -2640,80 +2707,18 @@ static void __btrfsic_submit_bio(struct bio *bio) ...@@ -2640,80 +2707,18 @@ static void __btrfsic_submit_bio(struct bio *bio)
if (!btrfsic_is_initialized) if (!btrfsic_is_initialized)
return; return;
mutex_lock(&btrfsic_mutex); /*
/* since btrfsic_submit_bio() is also called before * We can be called before btrfsic_mount, so there might not be a
* btrfsic_mount(), this might return NULL */ * dev_state.
*/
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
if (NULL != dev_state && mutex_lock(&btrfsic_mutex);
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { if (dev_state) {
int i = 0; if (bio_op(bio) == REQ_OP_WRITE && bio_has_data(bio))
u64 dev_bytenr; btrfsic_check_write_bio(bio, dev_state);
u64 cur_bytenr; else if (bio->bi_opf & REQ_PREFLUSH)
struct bio_vec bvec; btrfsic_check_flush_bio(bio, dev_state);
struct bvec_iter iter;
int bio_is_patched;
char **mapped_datav;
unsigned int segs = bio_segments(bio);
dev_bytenr = 512 * bio->bi_iter.bi_sector;
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, segs,
bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_len != PAGE_SIZE);
mapped_datav[i] = page_address(bvec.bv_page);
i++;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
cur_bytenr += bvec.bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, segs,
bio, &bio_is_patched,
bio->bi_opf);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
BTRFSIC_PRINT_MASK_VERBOSE)))
pr_info(
"btrfsic_submit_bio(%pg) with FLUSH but dummy block already in use (ignored)!\n",
dev_state->bdev);
} else {
struct btrfsic_block *const block =
&dev_state->dummy_block_for_bio_bh_flush;
block->is_iodone = 0;
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
block->submit_bio_bh_rw = bio->bi_opf;
block->orig_bio_private = bio->bi_private;
block->orig_bio_end_io = bio->bi_end_io;
block->next_in_same_bio = NULL;
bio->bi_private = block;
bio->bi_end_io = btrfsic_bio_end_io;
}
} }
leave:
mutex_unlock(&btrfsic_mutex); mutex_unlock(&btrfsic_mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment