Commit 2caffb6a authored by Kemeng Shi's avatar Kemeng Shi Committed by Theodore Ts'o

ext4: use correct criteria name instead stale integer number in comment

Use correct criteria name instead stale integer number in comment
Signed-off-by: default avatarKemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: default avatarOjaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20240424061904.987525-5-shikemeng@huaweicloud.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent d1a3924e
...@@ -213,11 +213,14 @@ enum criteria { ...@@ -213,11 +213,14 @@ enum criteria {
#define EXT4_MB_USE_RESERVED 0x2000 #define EXT4_MB_USE_RESERVED 0x2000
/* Do strict check for free blocks while retrying block allocation */ /* Do strict check for free blocks while retrying block allocation */
#define EXT4_MB_STRICT_CHECK 0x4000 #define EXT4_MB_STRICT_CHECK 0x4000
/* Large fragment size list lookup succeeded at least once for cr = 0 */ /* Large fragment size list lookup succeeded at least once for
* CR_POWER2_ALIGNED */
#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000 #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */ /* Avg fragment size rb tree lookup succeeded at least once for
* CR_GOAL_LEN_FAST */
#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000 #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */ /* Avg fragment size rb tree lookup succeeded at least once for
* CR_BEST_AVAIL_LEN */
#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000 #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
struct ext4_allocation_request { struct ext4_allocation_request {
......
...@@ -1135,8 +1135,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, ...@@ -1135,8 +1135,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
ext4_mb_choose_next_group_best_avail(ac, new_cr, group); ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
} else { } else {
/* /*
* TODO: For CR=2, we can arrange groups in an rb tree sorted by * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
* bb_free. But until that happens, we should never come here. * rb tree sorted by bb_free. But until that happens, we should
* never come here.
*/ */
WARN_ON(1); WARN_ON(1);
} }
...@@ -2683,7 +2684,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ...@@ -2683,7 +2684,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
int ret; int ret;
/* /*
* cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
* search to find large good chunks almost for free. If buddy * search to find large good chunks almost for free. If buddy
* data is not ready, then this optimization makes no sense. But * data is not ready, then this optimization makes no sense. But
* we never skip the first block group in a flex_bg, since this * we never skip the first block group in a flex_bg, since this
...@@ -3448,10 +3449,11 @@ static int ext4_mb_init_backend(struct super_block *sb) ...@@ -3448,10 +3449,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
} }
if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
sbi->s_mb_prefetch = ext4_get_groups_count(sb); sbi->s_mb_prefetch = ext4_get_groups_count(sb);
/* now many real IOs to prefetch within a single allocation at cr=0 /*
* given cr=0 is an CPU-related optimization we shouldn't try to * now many real IOs to prefetch within a single allocation at
* load too many groups, at some point we should start to use what * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
* we've got in memory. * optimization we shouldn't try to load too many groups, at some point
* we should start to use what we've got in memory.
* with an average random access time 5ms, it'd take a second to get * with an average random access time 5ms, it'd take a second to get
* 200 groups (* N with flex_bg), so let's make this limit 4 * 200 groups (* N with flex_bg), so let's make this limit 4
*/ */
......
...@@ -187,8 +187,8 @@ struct ext4_allocation_context { ...@@ -187,8 +187,8 @@ struct ext4_allocation_context {
struct ext4_free_extent ac_f_ex; struct ext4_free_extent ac_f_ex;
/* /*
* goal len can change in CR1.5, so save the original len. This is * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
* used while adjusting the PA window and for accounting. * This is used while adjusting the PA window and for accounting.
*/ */
ext4_grpblk_t ac_orig_goal_len; ext4_grpblk_t ac_orig_goal_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment