Commit bcf43499 authored by Ojaswin Mujoo's avatar Ojaswin Mujoo Committed by Theodore Ts'o

ext4: Refactor code in ext4_mb_normalize_request() and ext4_mb_use_preallocated()

Change some variable names to be more consistent and
refactor some of the code to make it easier to read.

There are no functional changes in this patch
Signed-off-by: default avatarOjaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/8edcab489c06cf861b19d87207d9b0ff7ac7f3c1.1679731817.git.ojaswin@linux.ibm.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 82089725
...@@ -3994,7 +3994,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ...@@ -3994,7 +3994,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
loff_t orig_size __maybe_unused; loff_t orig_size __maybe_unused;
ext4_lblk_t start; ext4_lblk_t start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_prealloc_space *pa; struct ext4_prealloc_space *tmp_pa;
ext4_lblk_t tmp_pa_start, tmp_pa_end;
/* do normalize only data requests, metadata requests /* do normalize only data requests, metadata requests
do not need preallocation */ do not need preallocation */
...@@ -4097,54 +4098,52 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ...@@ -4097,54 +4098,52 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* check we don't cross already preallocated blocks */ /* check we don't cross already preallocated blocks */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
ext4_lblk_t pa_end; if (tmp_pa->pa_deleted)
if (pa->pa_deleted)
continue; continue;
spin_lock(&pa->pa_lock); spin_lock(&tmp_pa->pa_lock);
if (pa->pa_deleted) { if (tmp_pa->pa_deleted) {
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
continue; continue;
} }
pa_end = pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len); tmp_pa_start = tmp_pa->pa_lstart;
tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
/* PA must not overlap original request */ /* PA must not overlap original request */
BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
ac->ac_o_ex.fe_logical < pa->pa_lstart)); ac->ac_o_ex.fe_logical < tmp_pa_start));
/* skip PAs this normalized request doesn't overlap with */ /* skip PAs this normalized request doesn't overlap with */
if (pa->pa_lstart >= end || pa_end <= start) { if (tmp_pa_start >= end || tmp_pa_end <= start) {
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
continue; continue;
} }
BUG_ON(pa->pa_lstart <= start && pa_end >= end); BUG_ON(tmp_pa_start <= start && tmp_pa_end >= end);
/* adjust start or end to be adjacent to this pa */ /* adjust start or end to be adjacent to this pa */
if (pa_end <= ac->ac_o_ex.fe_logical) { if (tmp_pa_end <= ac->ac_o_ex.fe_logical) {
BUG_ON(pa_end < start); BUG_ON(tmp_pa_end < start);
start = pa_end; start = tmp_pa_end;
} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { } else if (tmp_pa_start > ac->ac_o_ex.fe_logical) {
BUG_ON(pa->pa_lstart > end); BUG_ON(tmp_pa_start > end);
end = pa->pa_lstart; end = tmp_pa_start;
} }
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
size = end - start; size = end - start;
/* XXX: extra loop to check we really don't overlap preallocations */ /* XXX: extra loop to check we really don't overlap preallocations */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
ext4_lblk_t pa_end; spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0) {
spin_lock(&pa->pa_lock); tmp_pa_start = tmp_pa->pa_lstart;
if (pa->pa_deleted == 0) { tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
pa_end = pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len); BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
} }
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -4359,7 +4358,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -4359,7 +4358,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
int order, i; int order, i;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg; struct ext4_locality_group *lg;
struct ext4_prealloc_space *pa, *cpa = NULL; struct ext4_prealloc_space *tmp_pa, *cpa = NULL;
ext4_lblk_t tmp_pa_start, tmp_pa_end;
ext4_fsblk_t goal_block; ext4_fsblk_t goal_block;
/* only data can be preallocated */ /* only data can be preallocated */
...@@ -4368,18 +4368,20 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -4368,18 +4368,20 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* first, try per-file preallocation */ /* first, try per-file preallocation */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) {
/* all fields in this condition don't change, /* all fields in this condition don't change,
* so we can skip locking for them */ * so we can skip locking for them */
if (ac->ac_o_ex.fe_logical < pa->pa_lstart || tmp_pa_start = tmp_pa->pa_lstart;
ac->ac_o_ex.fe_logical >= (pa->pa_lstart + tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
EXT4_C2B(sbi, pa->pa_len)))
if (ac->ac_o_ex.fe_logical < tmp_pa_start ||
ac->ac_o_ex.fe_logical >= tmp_pa_end)
continue; continue;
/* non-extent files can't have physical blocks past 2^32 */ /* non-extent files can't have physical blocks past 2^32 */
if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
EXT4_MAX_BLOCK_FILE_PHYS)) { EXT4_MAX_BLOCK_FILE_PHYS)) {
/* /*
* Since PAs don't overlap, we won't find any * Since PAs don't overlap, we won't find any
...@@ -4389,16 +4391,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -4389,16 +4391,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
} }
/* found preallocated blocks, use them */ /* found preallocated blocks, use them */
spin_lock(&pa->pa_lock); spin_lock(&tmp_pa->pa_lock);
if (pa->pa_deleted == 0 && pa->pa_free) { if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free) {
atomic_inc(&pa->pa_count); atomic_inc(&tmp_pa->pa_count);
ext4_mb_use_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, tmp_pa);
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
ac->ac_criteria = 10; ac->ac_criteria = 10;
rcu_read_unlock(); rcu_read_unlock();
return true; return true;
} }
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -4422,16 +4424,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -4422,16 +4424,16 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
*/ */
for (i = order; i < PREALLOC_TB_SIZE; i++) { for (i = order; i < PREALLOC_TB_SIZE; i++) {
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
pa_inode_list) { pa_inode_list) {
spin_lock(&pa->pa_lock); spin_lock(&tmp_pa->pa_lock);
if (pa->pa_deleted == 0 && if (tmp_pa->pa_deleted == 0 &&
pa->pa_free >= ac->ac_o_ex.fe_len) { tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
cpa = ext4_mb_check_group_pa(goal_block, cpa = ext4_mb_check_group_pa(goal_block,
pa, cpa); tmp_pa, cpa);
} }
spin_unlock(&pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment