Commit 30493ff4 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: remove stripe boundary calculation for compressed I/O

Stop looking at the stripe boundary in alloc_compressed_bio() now that
that btrfs_submit_bio can split bios, open code the now trivial code
from alloc_compressed_bio() in btrfs_submit_compressed_read and stop
maintaining the pending_ios count for reads as there is always just
a single bio now.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
[hch: remove more cruft in btrfs_submit_compressed_read,
      use btrfs_zoned_get_device in alloc_compressed_bio]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2380220e
...@@ -141,12 +141,15 @@ static int compression_decompress(int type, struct list_head *ws, ...@@ -141,12 +141,15 @@ static int compression_decompress(int type, struct list_head *ws,
static int btrfs_decompress_bio(struct compressed_bio *cb); static int btrfs_decompress_bio(struct compressed_bio *cb);
static void finish_compressed_bio_read(struct compressed_bio *cb) static void end_compressed_bio_read(struct btrfs_bio *bbio)
{ {
struct compressed_bio *cb = bbio->private;
unsigned int index; unsigned int index;
struct page *page; struct page *page;
if (cb->status == BLK_STS_OK) if (bbio->bio.bi_status)
cb->status = bbio->bio.bi_status;
else
cb->status = errno_to_blk_status(btrfs_decompress_bio(cb)); cb->status = errno_to_blk_status(btrfs_decompress_bio(cb));
/* Release the compressed pages */ /* Release the compressed pages */
...@@ -162,17 +165,6 @@ static void finish_compressed_bio_read(struct compressed_bio *cb) ...@@ -162,17 +165,6 @@ static void finish_compressed_bio_read(struct compressed_bio *cb)
/* Finally free the cb struct */ /* Finally free the cb struct */
kfree(cb->compressed_pages); kfree(cb->compressed_pages);
kfree(cb); kfree(cb);
}
static void end_compressed_bio_read(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = bbio->private;
if (bbio->bio.bi_status)
cb->status = bbio->bio.bi_status;
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb);
bio_put(&bbio->bio); bio_put(&bbio->bio);
} }
...@@ -289,43 +281,30 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio) ...@@ -289,43 +281,30 @@ static void end_compressed_bio_write(struct btrfs_bio *bbio)
* from or written to. * from or written to.
* @endio_func: The endio function to call after the IO for compressed data * @endio_func: The endio function to call after the IO for compressed data
* is finished. * is finished.
* @next_stripe_start: Return value of logical bytenr of where next stripe starts.
* Let the caller know to only fill the bio up to the stripe
* boundary.
*/ */
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr, static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
blk_opf_t opf, blk_opf_t opf,
btrfs_bio_end_io_t endio_func, btrfs_bio_end_io_t endio_func)
u64 *next_stripe_start)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
struct btrfs_io_geometry geom;
struct extent_map *em;
struct bio *bio; struct bio *bio;
int ret;
bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, BTRFS_I(cb->inode), endio_func, bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, BTRFS_I(cb->inode), endio_func,
cb); cb);
bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize); if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
if (IS_ERR(em)) { struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
struct btrfs_device *device;
device = btrfs_zoned_get_device(fs_info, disk_bytenr,
fs_info->sectorsize);
if (IS_ERR(device)) {
bio_put(bio); bio_put(bio);
return ERR_CAST(em); return ERR_CAST(device);
} }
if (bio_op(bio) == REQ_OP_ZONE_APPEND) bio_set_dev(bio, device->bdev);
bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
free_extent_map(em);
if (ret < 0) {
bio_put(bio);
return ERR_PTR(ret);
} }
*next_stripe_start = disk_bytenr + geom.len;
refcount_inc(&cb->pending_ios); refcount_inc(&cb->pending_ios);
return bio; return bio;
} }
...@@ -352,7 +331,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -352,7 +331,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
struct bio *bio = NULL; struct bio *bio = NULL;
struct compressed_bio *cb; struct compressed_bio *cb;
u64 cur_disk_bytenr = disk_start; u64 cur_disk_bytenr = disk_start;
u64 next_stripe_start;
blk_status_t ret = BLK_STS_OK; blk_status_t ret = BLK_STS_OK;
const bool use_append = btrfs_use_zone_append(inode, disk_start); const bool use_append = btrfs_use_zone_append(inode, disk_start);
const enum req_op bio_op = REQ_BTRFS_ONE_ORDERED | const enum req_op bio_op = REQ_BTRFS_ONE_ORDERED |
...@@ -388,8 +366,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -388,8 +366,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
/* Allocate new bio if submitted or not yet allocated */ /* Allocate new bio if submitted or not yet allocated */
if (!bio) { if (!bio) {
bio = alloc_compressed_bio(cb, cur_disk_bytenr, bio = alloc_compressed_bio(cb, cur_disk_bytenr,
bio_op | write_flags, end_compressed_bio_write, bio_op | write_flags, end_compressed_bio_write);
&next_stripe_start);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
ret = errno_to_blk_status(PTR_ERR(bio)); ret = errno_to_blk_status(PTR_ERR(bio));
break; break;
...@@ -398,20 +375,12 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -398,20 +375,12 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
if (blkcg_css) if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT; bio->bi_opf |= REQ_CGROUP_PUNT;
} }
/*
* We should never reach next_stripe_start start as we will
* submit comp_bio when reach the boundary immediately.
*/
ASSERT(cur_disk_bytenr != next_stripe_start);
/* /*
* We have various limits on the real read size: * We have various limits on the real read size:
* - stripe boundary
* - page boundary * - page boundary
* - compressed length boundary * - compressed length boundary
*/ */
real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr); real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset));
real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
real_size = min_t(u64, real_size, compressed_len - offset); real_size = min_t(u64, real_size, compressed_len - offset);
ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
...@@ -426,9 +395,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -426,9 +395,6 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
submit = true; submit = true;
cur_disk_bytenr += added; cur_disk_bytenr += added;
/* Reached stripe boundary */
if (cur_disk_bytenr == next_stripe_start)
submit = true;
/* Finished the range */ /* Finished the range */
if (cur_disk_bytenr == disk_start + compressed_len) if (cur_disk_bytenr == disk_start + compressed_len)
...@@ -623,10 +589,9 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -623,10 +589,9 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
struct compressed_bio *cb; struct compressed_bio *cb;
unsigned int compressed_len; unsigned int compressed_len;
struct bio *comp_bio = NULL; struct bio *comp_bio;
const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 cur_disk_byte = disk_bytenr; u64 cur_disk_byte = disk_bytenr;
u64 next_stripe_start;
u64 file_offset; u64 file_offset;
u64 em_len; u64 em_len;
u64 em_start; u64 em_start;
...@@ -693,37 +658,23 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -693,37 +658,23 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* include any pages we added in add_ra-bio_pages */ /* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size; cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, BTRFS_I(cb->inode),
end_compressed_bio_read, cb);
comp_bio->bi_iter.bi_sector = (cur_disk_byte >> SECTOR_SHIFT);
while (cur_disk_byte < disk_bytenr + compressed_len) { while (cur_disk_byte < disk_bytenr + compressed_len) {
u64 offset = cur_disk_byte - disk_bytenr; u64 offset = cur_disk_byte - disk_bytenr;
unsigned int index = offset >> PAGE_SHIFT; unsigned int index = offset >> PAGE_SHIFT;
unsigned int real_size; unsigned int real_size;
unsigned int added; unsigned int added;
struct page *page = cb->compressed_pages[index]; struct page *page = cb->compressed_pages[index];
bool submit = false;
/* Allocate new bio if submitted or not yet allocated */
if (!comp_bio) {
comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
REQ_OP_READ, end_compressed_bio_read,
&next_stripe_start);
if (IS_ERR(comp_bio)) {
cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
break;
}
}
/*
* We should never reach next_stripe_start start as we will
* submit comp_bio when reach the boundary immediately.
*/
ASSERT(cur_disk_byte != next_stripe_start);
/* /*
* We have various limit on the real read size: * We have various limit on the real read size:
* - stripe boundary
* - page boundary * - page boundary
* - compressed length boundary * - compressed length boundary
*/ */
real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte); real_size = min_t(u64, U32_MAX, PAGE_SIZE - offset_in_page(offset));
real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
real_size = min_t(u64, real_size, compressed_len - offset); real_size = min_t(u64, real_size, compressed_len - offset);
ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
...@@ -734,35 +685,20 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -734,35 +685,20 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
*/ */
ASSERT(added == real_size); ASSERT(added == real_size);
cur_disk_byte += added; cur_disk_byte += added;
}
/* Reached stripe boundary, need to submit */ if (memstall)
if (cur_disk_byte == next_stripe_start) psi_memstall_leave(&pflags);
submit = true;
/* Has finished the range, need to submit */
if (cur_disk_byte == disk_bytenr + compressed_len)
submit = true;
if (submit) {
/* /*
* Save the initial offset of this chunk, as there * Stash the initial offset of this chunk, as there is no direct
* is no direct correlation between compressed pages and * correlation between compressed pages and the original file offset.
* the original file offset. The field is only used for * The field is only used for printing error messages anyway.
* printing error messages.
*/ */
btrfs_bio(comp_bio)->file_offset = file_offset; btrfs_bio(comp_bio)->file_offset = file_offset;
ASSERT(comp_bio->bi_iter.bi_size); ASSERT(comp_bio->bi_iter.bi_size);
btrfs_submit_bio(fs_info, comp_bio, mirror_num); btrfs_submit_bio(fs_info, comp_bio, mirror_num);
comp_bio = NULL;
}
}
if (memstall)
psi_memstall_leave(&pflags);
if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb);
return; return;
fail: fail:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment