Commit c93104e7 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: split btrfs_submit_data_bio to read and write parts

Split btrfs_submit_data_bio into one helper for reads and one for writes.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e6484bd4
......@@ -3259,8 +3259,9 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
u64 btrfs_file_extent_end(const struct btrfs_path *path);
/* inode.c */
void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type);
void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type);
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
u32 pgoff, u8 *csum, const u8 * const csum_expected);
unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
......
......@@ -182,17 +182,20 @@ static void submit_one_bio(struct bio *bio, int mirror_num,
enum btrfs_compression_type compress_type)
{
struct extent_io_tree *tree = bio->bi_private;
struct inode *inode = tree->private_data;
bio->bi_private = NULL;
/* Caller should ensure the bio has at least some range added */
ASSERT(bio->bi_iter.bi_size);
if (is_data_inode(tree->private_data))
btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
compress_type);
if (!is_data_inode(inode))
btrfs_submit_metadata_bio(inode, bio, mirror_num);
else if (btrfs_op(bio) == BTRFS_MAP_WRITE)
btrfs_submit_data_write_bio(inode, bio, mirror_num);
else
btrfs_submit_metadata_bio(tree->private_data, bio, mirror_num);
btrfs_submit_data_read_bio(inode, bio, mirror_num, compress_type);
/*
* Above submission hooks will handle the error by ending the bio,
* which will do the cleanup properly. So here we should not return
......@@ -2786,7 +2789,7 @@ static void submit_data_read_repair(struct inode *inode, struct bio *failed_bio,
ret = btrfs_repair_one_sector(inode, failed_bio,
bio_offset + offset,
page, pgoff + offset, start + offset,
failed_mirror, btrfs_submit_data_bio);
failed_mirror, btrfs_submit_data_read_bio);
if (!ret) {
/*
* We have submitted the read repair, the page release
......
......@@ -2580,90 +2580,80 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
return errno_to_blk_status(ret);
}
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read.
*
* Rules about async/sync submit,
* a) read: sync submit
*
* b) write without checksum: sync submit
*
* c) write with checksum:
* c-1) if bio is issued by fsync: sync submit
* (sync_writers != 0)
*
* c-2) if root is reloc root: sync submit
* (only in case of buffered IO)
*
* c-3) otherwise: async submit
*/
void btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type)
void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
blk_status_t ret = 0;
int skip_sum;
int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
skip_sum = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
struct btrfs_inode *bi = BTRFS_I(inode);
blk_status_t ret;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
struct page *page = bio_first_bvec_all(bio)->bv_page;
loff_t file_offset = page_offset(page);
ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
ret = extract_ordered_extent(bi, bio,
page_offset(bio_first_bvec_all(bio)->bv_page));
if (ret)
goto out;
}
if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
if (ret)
goto out;
if (compress_type != BTRFS_COMPRESS_NONE) {
/*
* btrfs_submit_compressed_read will handle completing
* the bio if there were any errors, so just return
* here.
*/
btrfs_submit_compressed_read(inode, bio, mirror_num);
return;
} else {
/*
* Lookup bio sums does extra checks around whether we
* need to csum or not, which is why we ignore skip_sum
* here.
*/
ret = btrfs_lookup_bio_sums(inode, bio, NULL);
/*
* Rules for async/sync submit:
* a) write without checksum: sync submit
* b) write with checksum:
* b-1) if bio is issued by fsync: sync submit
* (sync_writers != 0)
* b-2) if root is reloc root: sync submit
* (only in case of buffered IO)
* b-3) otherwise: async submit
*/
if (!(bi->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
if (atomic_read(&bi->sync_writers)) {
ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
if (ret)
goto out;
} else if (btrfs_is_data_reloc_root(bi->root)) {
; /* Csum items have already been cloned */
} else {
ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
btrfs_submit_bio_start);
goto out;
}
goto mapit;
} else if (async && !skip_sum) {
/* csum items have already been cloned */
if (btrfs_is_data_reloc_root(root))
goto mapit;
/* we're doing a write, do the async checksumming */
ret = btrfs_wq_submit_bio(inode, bio, mirror_num,
0, btrfs_submit_bio_start);
}
ret = btrfs_map_bio(fs_info, bio, mirror_num);
out:
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
}
}
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
blk_status_t ret;
ret = btrfs_bio_wq_end_io(fs_info, bio,
btrfs_is_free_space_inode(BTRFS_I(inode)) ?
BTRFS_WQ_ENDIO_FREE_SPACE : BTRFS_WQ_ENDIO_DATA);
if (ret)
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, (u64)-1, false);
if (ret)
goto out;
if (compress_type != BTRFS_COMPRESS_NONE) {
/*
* btrfs_submit_compressed_read will handle completing the bio
* if there were any errors, so just return here.
*/
btrfs_submit_compressed_read(inode, bio, mirror_num);
return;
}
mapit:
/*
* Lookup bio sums does extra checks around whether we need to csum or
* not, which is why we ignore skip_sum here.
*/
ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret)
goto out;
ret = btrfs_map_bio(fs_info, bio, mirror_num);
out:
if (ret) {
bio->bi_status = ret;
......@@ -7993,7 +7983,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
goto map;
if (write) {
/* Check btrfs_submit_data_bio() for async submit rules */
/* Check btrfs_submit_data_write_bio() for async submit rules */
if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers))
return btrfs_wq_submit_bio(inode, bio, 0, file_offset,
btrfs_submit_bio_start_direct_io);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment