Commit fffb2739 authored by Jan Kara's avatar Jan Kara Committed by Theodore Ts'o

ext4: better estimate credits needed for ext4_da_writepages()

We limit the number of blocks written in a single loop of
ext4_da_writepages() to 64 when inode uses indirect blocks.  That is
unnecessary as credit estimates for mapping logically continguous run
of blocks is rather low even for inode with indirect blocks.  So just
lift this limitation and properly calculate the number of necessary
credits.

This better credit estimate will also later allow us to always write
at least a single page in one iteration.
Reviewed-by: default avatarZheng Liu <wenqing.lz@taobao.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent fa55a0ed
...@@ -2596,8 +2596,7 @@ struct ext4_extent; ...@@ -2596,8 +2596,7 @@ struct ext4_extent;
extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
extern int ext4_ext_writepage_trans_blocks(struct inode *, int); extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
int chunk);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags); struct ext4_map_blocks *map, int flags);
extern void ext4_ext_truncate(handle_t *, struct inode *); extern void ext4_ext_truncate(handle_t *, struct inode *);
......
...@@ -2328,17 +2328,15 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, ...@@ -2328,17 +2328,15 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
} }
/* /*
* How many index/leaf blocks need to change/allocate to modify nrblocks? * How many index/leaf blocks need to change/allocate to add @extents extents?
* *
* if nrblocks are fit in a single extent (chunk flag is 1), then * If we add a single extent, then in the worse case, each tree level
* in the worse case, each tree level index/leaf need to be changed * index/leaf need to be changed in case of the tree split.
* if the tree split due to insert a new extent, then the old tree
* index/leaf need to be updated too
* *
* If the nrblocks are discontiguous, they could cause * If more extents are inserted, they could cause the whole tree split more
* the whole tree split more than once, but this is really rare. * than once, but this is really rare.
*/ */
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
{ {
int index; int index;
int depth; int depth;
...@@ -2349,7 +2347,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) ...@@ -2349,7 +2347,7 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
depth = ext_depth(inode); depth = ext_depth(inode);
if (chunk) if (extents <= 1)
index = depth * 2; index = depth * 2;
else else
index = depth * 3; index = depth * 3;
......
...@@ -136,6 +136,8 @@ static void ext4_invalidatepage(struct page *page, unsigned int offset, ...@@ -136,6 +136,8 @@ static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length); unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents);
/* /*
* Test whether an inode is a fast symlink. * Test whether an inode is a fast symlink.
...@@ -2203,28 +2205,25 @@ static int ext4_writepage(struct page *page, ...@@ -2203,28 +2205,25 @@ static int ext4_writepage(struct page *page,
} }
/* /*
* This is called via ext4_da_writepages() to * mballoc gives us at most this number of blocks...
* calculate the total number of credits to reserve to fit * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
* a single extent allocation into a single transaction, * The rest of mballoc seems to handle chunks upto full group size.
* ext4_da_writpeages() will loop calling this before
* the block allocation.
*/ */
#define MAX_WRITEPAGES_EXTENT_LEN 2048
/*
* Calculate the total number of credits to reserve for one writepages
* iteration. This is called from ext4_da_writepages(). We map an extent of
* upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
* the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
* bpp - 1 blocks in bpp different extents.
*/
static int ext4_da_writepages_trans_blocks(struct inode *inode) static int ext4_da_writepages_trans_blocks(struct inode *inode)
{ {
int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; int bpp = ext4_journal_blocks_per_page(inode);
/*
* With non-extent format the journal credit needed to
* insert nrblocks contiguous block is dependent on
* number of contiguous block. So we will limit
* number of contiguous block to a sane value
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
(max_blocks > EXT4_MAX_TRANS_DATA))
max_blocks = EXT4_MAX_TRANS_DATA;
return ext4_chunk_trans_blocks(inode, max_blocks); return ext4_meta_trans_blocks(inode,
MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
} }
/* /*
...@@ -4650,11 +4649,12 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, ...@@ -4650,11 +4649,12 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0; return 0;
} }
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{ {
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return ext4_ind_trans_blocks(inode, nrblocks); return ext4_ind_trans_blocks(inode, lblocks);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); return ext4_ext_index_trans_blocks(inode, pextents);
} }
/* /*
...@@ -4668,7 +4668,8 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) ...@@ -4668,7 +4668,8 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
* *
* Also account for superblock, inode, quota and xattr blocks * Also account for superblock, inode, quota and xattr blocks
*/ */
static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents)
{ {
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks; int gdpblocks;
...@@ -4676,14 +4677,10 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) ...@@ -4676,14 +4677,10 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
int ret = 0; int ret = 0;
/* /*
* How many index blocks need to touch to modify nrblocks? * How many index blocks need to touch to map @lblocks logical blocks
* The "Chunk" flag indicating whether the nrblocks is * to @pextents physical extents?
* physically contiguous on disk
*
* For Direct IO and fallocate, they calls get_block to allocate
* one single extent at a time, so they could set the "Chunk" flag
*/ */
idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
ret = idxblocks; ret = idxblocks;
...@@ -4691,12 +4688,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) ...@@ -4691,12 +4688,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
* Now let's see how many group bitmaps and group descriptors need * Now let's see how many group bitmaps and group descriptors need
* to account * to account
*/ */
groups = idxblocks; groups = idxblocks + pextents;
if (chunk)
groups += 1;
else
groups += nrblocks;
gdpblocks = groups; gdpblocks = groups;
if (groups > ngroups) if (groups > ngroups)
groups = ngroups; groups = ngroups;
...@@ -4727,7 +4719,7 @@ int ext4_writepage_trans_blocks(struct inode *inode) ...@@ -4727,7 +4719,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
int bpp = ext4_journal_blocks_per_page(inode); int bpp = ext4_journal_blocks_per_page(inode);
int ret; int ret;
ret = ext4_meta_trans_blocks(inode, bpp, 0); ret = ext4_meta_trans_blocks(inode, bpp, bpp);
/* Account for data blocks for journalled mode */ /* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode)) if (ext4_should_journal_data(inode))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment