Commit a60108f7 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: use BLKS_PER_SEG, BLKS_PER_SEC, and SEGS_PER_SEC

No functional change.
Reviewed-by: default avatarDaeho Jeong <daehojeong@google.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 87161a2b
...@@ -900,7 +900,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, ...@@ -900,7 +900,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) { if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
le32_to_cpu(cp_block->cp_pack_total_block_count)); le32_to_cpu(cp_block->cp_pack_total_block_count));
goto invalid_cp; goto invalid_cp;
...@@ -1335,7 +1335,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1335,7 +1335,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason & CP_UMOUNT) { if (cpc->reason & CP_UMOUNT) {
if (le32_to_cpu(ckpt->cp_pack_total_block_count) + if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) { NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
f2fs_notice(sbi, "Disable nat_bits due to no space"); f2fs_notice(sbi, "Disable nat_bits due to no space");
} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) && } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
...@@ -1538,7 +1538,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1538,7 +1538,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_ver |= ((__u64)crc32 << 32); cp_ver |= ((__u64)crc32 << 32);
*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits + f2fs_update_meta_page(sbi, nm_i->nat_bits +
(i << F2FS_BLKSIZE_BITS), blk + i); (i << F2FS_BLKSIZE_BITS), blk + i);
...@@ -1741,9 +1741,9 @@ void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) ...@@ -1741,9 +1741,9 @@ void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
im->ino_num = 0; im->ino_num = 0;
} }
sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
F2FS_ORPHANS_PER_BLOCK; F2FS_ORPHANS_PER_BLOCK;
} }
int __init f2fs_create_checkpoint_caches(void) int __init f2fs_create_checkpoint_caches(void)
......
...@@ -41,7 +41,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi) ...@@ -41,7 +41,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
total_vblocks = 0; total_vblocks = 0;
blks_per_sec = CAP_BLKS_PER_SEC(sbi); blks_per_sec = CAP_BLKS_PER_SEC(sbi);
hblks_per_sec = blks_per_sec / 2; hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
vblocks = get_valid_blocks(sbi, segno, true); vblocks = get_valid_blocks(sbi, segno, true);
dist = abs(vblocks - hblks_per_sec); dist = abs(vblocks - hblks_per_sec);
bimodal += dist * dist; bimodal += dist * dist;
...@@ -135,7 +135,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -135,7 +135,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->cur_ckpt_time = sbi->cprc_info.cur_time; si->cur_ckpt_time = sbi->cprc_info.cur_time;
si->peak_ckpt_time = sbi->cprc_info.peak_time; si->peak_ckpt_time = sbi->cprc_info.peak_time;
spin_unlock(&sbi->cprc_info.stat_lock); spin_unlock(&sbi->cprc_info.stat_lock);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->total_count = (int)sbi->user_block_count / BLKS_PER_SEG(sbi);
si->rsvd_segs = reserved_segments(sbi); si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi); si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi); si->valid_count = valid_user_blocks(sbi);
...@@ -208,7 +208,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -208,7 +208,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
if (!blks) if (!blks)
continue; continue;
if (blks == sbi->blocks_per_seg) if (blks == BLKS_PER_SEG(sbi))
si->full_seg[type]++; si->full_seg[type]++;
else else
si->dirty_seg[type]++; si->dirty_seg[type]++;
......
...@@ -1813,6 +1813,14 @@ struct f2fs_sb_info { ...@@ -1813,6 +1813,14 @@ struct f2fs_sb_info {
#endif #endif
}; };
/* Definitions to access f2fs_sb_info */
#define BLKS_PER_SEG(sbi) \
((sbi)->blocks_per_seg)
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec << (sbi)->log_blocks_per_seg)
#define SEGS_PER_SEC(sbi) \
((sbi)->segs_per_sec)
__printf(3, 4) __printf(3, 4)
void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...); void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
...@@ -2511,11 +2519,8 @@ static inline int get_dirty_pages(struct inode *inode) ...@@ -2511,11 +2519,8 @@ static inline int get_dirty_pages(struct inode *inode)
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{ {
unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> BLKS_PER_SEC(sbi));
sbi->log_blocks_per_seg;
return segs / sbi->segs_per_sec;
} }
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
...@@ -2579,7 +2584,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) ...@@ -2579,7 +2584,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
if (sbi->cur_cp_pack == 2) if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg; start_addr += BLKS_PER_SEG(sbi);
return start_addr; return start_addr;
} }
...@@ -2588,7 +2593,7 @@ static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) ...@@ -2588,7 +2593,7 @@ static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
if (sbi->cur_cp_pack == 1) if (sbi->cur_cp_pack == 1)
start_addr += sbi->blocks_per_seg; start_addr += BLKS_PER_SEG(sbi);
return start_addr; return start_addr;
} }
...@@ -3458,7 +3463,7 @@ static inline __le32 *get_dnode_addr(struct inode *inode, ...@@ -3458,7 +3463,7 @@ static inline __le32 *get_dnode_addr(struct inode *inode,
sizeof((f2fs_inode)->field)) \ sizeof((f2fs_inode)->field)) \
<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) #define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1)
#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
......
...@@ -2580,7 +2580,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, ...@@ -2580,7 +2580,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
.m_may_create = false }; .m_may_create = false };
struct extent_info ei = {}; struct extent_info ei = {};
pgoff_t pg_start, pg_end, next_pgofs; pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num; unsigned int total = 0, sec_num;
block_t blk_end = 0; block_t blk_end = 0;
bool fragmented = false; bool fragmented = false;
...@@ -2689,7 +2688,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, ...@@ -2689,7 +2688,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
set_inode_flag(inode, FI_SKIP_WRITES); set_inode_flag(inode, FI_SKIP_WRITES);
idx = map.m_lblk; idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { while (idx < map.m_lblk + map.m_len &&
cnt < BLKS_PER_SEG(sbi)) {
struct page *page; struct page *page;
page = f2fs_get_lock_data_page(inode, idx, true); page = f2fs_get_lock_data_page(inode, idx, true);
...@@ -2709,7 +2709,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, ...@@ -2709,7 +2709,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
map.m_lblk = idx; map.m_lblk = idx;
check: check:
if (map.m_lblk < pg_end && cnt < blk_per_seg) if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
goto do_map; goto do_map;
clear_inode_flag(inode, FI_SKIP_WRITES); clear_inode_flag(inode, FI_SKIP_WRITES);
...@@ -2978,8 +2978,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) ...@@ -2978,8 +2978,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
__is_large_section(sbi)) { __is_large_section(sbi)) {
f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
return -EINVAL; return -EINVAL;
} }
...@@ -4081,7 +4081,6 @@ static int f2fs_ioc_decompress_file(struct file *filp) ...@@ -4081,7 +4081,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t page_idx = 0, last_idx; pgoff_t page_idx = 0, last_idx;
unsigned int blk_per_seg = sbi->blocks_per_seg;
int cluster_size = fi->i_cluster_size; int cluster_size = fi->i_cluster_size;
int count, ret; int count, ret;
...@@ -4125,7 +4124,7 @@ static int f2fs_ioc_decompress_file(struct file *filp) ...@@ -4125,7 +4124,7 @@ static int f2fs_ioc_decompress_file(struct file *filp)
if (ret < 0) if (ret < 0)
break; break;
if (get_dirty_pages(inode) >= blk_per_seg) { if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
ret = filemap_fdatawrite(inode->i_mapping); ret = filemap_fdatawrite(inode->i_mapping);
if (ret < 0) if (ret < 0)
break; break;
...@@ -4160,7 +4159,6 @@ static int f2fs_ioc_compress_file(struct file *filp) ...@@ -4160,7 +4159,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t page_idx = 0, last_idx; pgoff_t page_idx = 0, last_idx;
unsigned int blk_per_seg = sbi->blocks_per_seg;
int cluster_size = F2FS_I(inode)->i_cluster_size; int cluster_size = F2FS_I(inode)->i_cluster_size;
int count, ret; int count, ret;
...@@ -4203,7 +4201,7 @@ static int f2fs_ioc_compress_file(struct file *filp) ...@@ -4203,7 +4201,7 @@ static int f2fs_ioc_compress_file(struct file *filp)
if (ret < 0) if (ret < 0)
break; break;
if (get_dirty_pages(inode) >= blk_per_seg) { if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
ret = filemap_fdatawrite(inode->i_mapping); ret = filemap_fdatawrite(inode->i_mapping);
if (ret < 0) if (ret < 0)
break; break;
......
...@@ -259,7 +259,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, ...@@ -259,7 +259,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->ofs_unit = 1; p->ofs_unit = 1;
} else { } else {
p->gc_mode = select_gc_type(sbi, gc_type); p->gc_mode = select_gc_type(sbi, gc_type);
p->ofs_unit = sbi->segs_per_sec; p->ofs_unit = SEGS_PER_SEC(sbi);
if (__is_large_section(sbi)) { if (__is_large_section(sbi)) {
p->dirty_bitmap = dirty_i->dirty_secmap; p->dirty_bitmap = dirty_i->dirty_secmap;
p->max_search = count_bits(p->dirty_bitmap, p->max_search = count_bits(p->dirty_bitmap,
...@@ -282,7 +282,8 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, ...@@ -282,7 +282,8 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
/* let's select beginning hot/small space first in no_heap mode*/ /* let's select beginning hot/small space first in no_heap mode*/
if (f2fs_need_rand_seg(sbi)) if (f2fs_need_rand_seg(sbi))
p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec); p->offset = get_random_u32_below(MAIN_SECS(sbi) *
SEGS_PER_SEC(sbi));
else if (test_opt(sbi, NOHEAP) && else if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type))) (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
p->offset = 0; p->offset = 0;
...@@ -295,13 +296,13 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi, ...@@ -295,13 +296,13 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
{ {
/* SSR allocates in a segment unit */ /* SSR allocates in a segment unit */
if (p->alloc_mode == SSR) if (p->alloc_mode == SSR)
return sbi->blocks_per_seg; return BLKS_PER_SEG(sbi);
else if (p->alloc_mode == AT_SSR) else if (p->alloc_mode == AT_SSR)
return UINT_MAX; return UINT_MAX;
/* LFS */ /* LFS */
if (p->gc_mode == GC_GREEDY) if (p->gc_mode == GC_GREEDY)
return 2 * sbi->blocks_per_seg * p->ofs_unit; return 2 * BLKS_PER_SEG(sbi) * p->ofs_unit;
else if (p->gc_mode == GC_CB) else if (p->gc_mode == GC_CB)
return UINT_MAX; return UINT_MAX;
else if (p->gc_mode == GC_AT) else if (p->gc_mode == GC_AT)
...@@ -496,9 +497,9 @@ static void add_victim_entry(struct f2fs_sb_info *sbi, ...@@ -496,9 +497,9 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
return; return;
} }
for (i = 0; i < sbi->segs_per_sec; i++) for (i = 0; i < SEGS_PER_SEC(sbi); i++)
mtime += get_seg_entry(sbi, start + i)->mtime; mtime += get_seg_entry(sbi, start + i)->mtime;
mtime = div_u64(mtime, sbi->segs_per_sec); mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
/* Handle if the system time has changed by the user */ /* Handle if the system time has changed by the user */
if (mtime < sit_i->min_mtime) if (mtime < sit_i->min_mtime)
...@@ -599,7 +600,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, ...@@ -599,7 +600,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
unsigned long long age; unsigned long long age;
unsigned long long max_mtime = sit_i->dirty_max_mtime; unsigned long long max_mtime = sit_i->dirty_max_mtime;
unsigned long long min_mtime = sit_i->dirty_min_mtime; unsigned long long min_mtime = sit_i->dirty_min_mtime;
unsigned int seg_blocks = sbi->blocks_per_seg;
unsigned int vblocks; unsigned int vblocks;
unsigned int dirty_threshold = max(am->max_candidate_count, unsigned int dirty_threshold = max(am->max_candidate_count,
am->candidate_ratio * am->candidate_ratio *
...@@ -629,7 +629,7 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, ...@@ -629,7 +629,7 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, !vblocks); f2fs_bug_on(sbi, !vblocks);
/* rare case */ /* rare case */
if (vblocks == seg_blocks) if (vblocks == BLKS_PER_SEG(sbi))
goto skip_node; goto skip_node;
iter++; iter++;
...@@ -755,7 +755,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, ...@@ -755,7 +755,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
int ret = 0; int ret = 0;
mutex_lock(&dirty_i->seglist_lock); mutex_lock(&dirty_i->seglist_lock);
last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
p.alloc_mode = alloc_mode; p.alloc_mode = alloc_mode;
p.age = age; p.age = age;
...@@ -896,7 +896,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, ...@@ -896,7 +896,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
else else
sm->last_victim[p.gc_mode] = segno + p.ofs_unit; sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
sm->last_victim[p.gc_mode] %= sm->last_victim[p.gc_mode] %=
(MAIN_SECS(sbi) * sbi->segs_per_sec); (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
break; break;
} }
} }
...@@ -1670,7 +1670,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1670,7 +1670,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct f2fs_summary_block *sum; struct f2fs_summary_block *sum;
struct blk_plug plug; struct blk_plug plug;
unsigned int segno = start_segno; unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec; unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
int seg_freed = 0, migrated = 0; int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE; SUM_TYPE_DATA : SUM_TYPE_NODE;
...@@ -1678,7 +1678,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1678,7 +1678,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
int submitted = 0; int submitted = 0;
if (__is_large_section(sbi)) if (__is_large_section(sbi))
end_segno = rounddown(end_segno, sbi->segs_per_sec); end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
/* /*
* zone-capacity can be less than zone-size in zoned devices, * zone-capacity can be less than zone-size in zoned devices,
...@@ -1686,7 +1686,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1686,7 +1686,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
* calculate the end segno in the zone which can be garbage collected * calculate the end segno in the zone which can be garbage collected
*/ */
if (f2fs_sb_has_blkzoned(sbi)) if (f2fs_sb_has_blkzoned(sbi))
end_segno -= sbi->segs_per_sec - end_segno -= SEGS_PER_SEC(sbi) -
f2fs_usable_segs_in_sec(sbi, segno); f2fs_usable_segs_in_sec(sbi, segno);
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
...@@ -1986,7 +1986,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi, ...@@ -1986,7 +1986,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
/* Force block allocation for GC */ /* Force block allocation for GC */
MAIN_SECS(sbi) -= secs; MAIN_SECS(sbi) -= secs;
start = MAIN_SECS(sbi) * sbi->segs_per_sec; start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
end = MAIN_SEGS(sbi) - 1; end = MAIN_SEGS(sbi) - 1;
mutex_lock(&DIRTY_I(sbi)->seglist_lock); mutex_lock(&DIRTY_I(sbi)->seglist_lock);
...@@ -2004,7 +2004,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi, ...@@ -2004,7 +2004,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
f2fs_allocate_segment_for_resize(sbi, type, start, end); f2fs_allocate_segment_for_resize(sbi, type, start, end);
/* do GC to move out valid blocks in the range */ /* do GC to move out valid blocks in the range */
for (segno = start; segno <= end; segno += sbi->segs_per_sec) { for (segno = start; segno <= end; segno += SEGS_PER_SEC(sbi)) {
struct gc_inode_list gc_list = { struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist), .ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
...@@ -2048,7 +2048,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) ...@@ -2048,7 +2048,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
int segment_count; int segment_count;
int segment_count_main; int segment_count_main;
long long block_count; long long block_count;
int segs = secs * sbi->segs_per_sec; int segs = secs * SEGS_PER_SEC(sbi);
f2fs_down_write(&sbi->sb_lock); f2fs_down_write(&sbi->sb_lock);
...@@ -2061,7 +2061,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) ...@@ -2061,7 +2061,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
raw_sb->segment_count = cpu_to_le32(segment_count + segs); raw_sb->segment_count = cpu_to_le32(segment_count + segs);
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
raw_sb->block_count = cpu_to_le64(block_count + raw_sb->block_count = cpu_to_le64(block_count +
(long long)segs * sbi->blocks_per_seg); (long long)(segs << sbi->log_blocks_per_seg));
if (f2fs_is_multi_device(sbi)) { if (f2fs_is_multi_device(sbi)) {
int last_dev = sbi->s_ndevs - 1; int last_dev = sbi->s_ndevs - 1;
int dev_segs = int dev_segs =
...@@ -2076,8 +2076,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) ...@@ -2076,8 +2076,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
{ {
int segs = secs * sbi->segs_per_sec; int segs = secs * SEGS_PER_SEC(sbi);
long long blks = (long long)segs * sbi->blocks_per_seg; long long blks = (long long)segs << sbi->log_blocks_per_seg;
long long user_block_count = long long user_block_count =
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
...@@ -2119,7 +2119,7 @@ int f2fs_resize_fs(struct file *filp, __u64 block_count) ...@@ -2119,7 +2119,7 @@ int f2fs_resize_fs(struct file *filp, __u64 block_count)
int last_dev = sbi->s_ndevs - 1; int last_dev = sbi->s_ndevs - 1;
__u64 last_segs = FDEV(last_dev).total_segments; __u64 last_segs = FDEV(last_dev).total_segments;
if (block_count + last_segs * sbi->blocks_per_seg <= if (block_count + (last_segs << sbi->log_blocks_per_seg) <=
old_block_count) old_block_count)
return -EINVAL; return -EINVAL;
} }
......
...@@ -2841,7 +2841,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, ...@@ -2841,7 +2841,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
int i, idx, last_offset, nrpages; int i, idx, last_offset, nrpages;
/* scan the node segment */ /* scan the node segment */
last_offset = sbi->blocks_per_seg; last_offset = BLKS_PER_SEG(sbi);
addr = START_BLOCK(sbi, segno); addr = START_BLOCK(sbi, segno);
sum_entry = &sum->entries[0]; sum_entry = &sum->entries[0];
...@@ -3158,7 +3158,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) ...@@ -3158,7 +3158,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
return 0; return 0;
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
nm_i->nat_bits_blocks; nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) { for (i = 0; i < nm_i->nat_bits_blocks; i++) {
struct page *page; struct page *page;
......
...@@ -208,10 +208,10 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) ...@@ -208,10 +208,10 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
block_addr = (pgoff_t)(nm_i->nat_blkaddr + block_addr = (pgoff_t)(nm_i->nat_blkaddr +
(block_off << 1) - (block_off << 1) -
(block_off & (sbi->blocks_per_seg - 1))); (block_off & (BLKS_PER_SEG(sbi) - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
block_addr += sbi->blocks_per_seg; block_addr += BLKS_PER_SEG(sbi);
return block_addr; return block_addr;
} }
......
...@@ -354,7 +354,7 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi, ...@@ -354,7 +354,7 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
if (blkaddr + 1 == next_blkaddr) if (blkaddr + 1 == next_blkaddr)
ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS, ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
ra_blocks * 2); ra_blocks * 2);
else if (next_blkaddr % sbi->blocks_per_seg) else if (next_blkaddr % BLKS_PER_SEG(sbi))
ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS, ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
ra_blocks / 2); ra_blocks / 2);
return ra_blocks; return ra_blocks;
......
This diff is collapsed.
...@@ -48,21 +48,21 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, ...@@ -48,21 +48,21 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define IS_CURSEC(sbi, secno) \ #define IS_CURSEC(sbi, secno) \
(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
(sbi)->segs_per_sec) || \ SEGS_PER_SEC(sbi)) || \
((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
(sbi)->segs_per_sec)) SEGS_PER_SEC(sbi)))
#define MAIN_BLKADDR(sbi) \ #define MAIN_BLKADDR(sbi) \
(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
...@@ -93,24 +93,22 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, ...@@ -93,24 +93,22 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg) (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1)) (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
#define GET_SEGNO(sbi, blk_addr) \ #define GET_SEGNO(sbi, blk_addr) \
((!__is_valid_data_blkaddr(blk_addr)) ? \ ((!__is_valid_data_blkaddr(blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr))) GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
#define CAP_BLKS_PER_SEC(sbi) \ #define CAP_BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \ (SEGS_PER_SEC(sbi) * BLKS_PER_SEG(sbi) - \
(sbi)->unusable_blocks_per_sec) (sbi)->unusable_blocks_per_sec)
#define CAP_SEGS_PER_SEC(sbi) \ #define CAP_SEGS_PER_SEC(sbi) \
((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\ (SEGS_PER_SEC(sbi) - ((sbi)->unusable_blocks_per_sec >> \
(sbi)->log_blocks_per_seg)) (sbi)->log_blocks_per_seg))
#define GET_SEC_FROM_SEG(sbi, segno) \ #define GET_SEC_FROM_SEG(sbi, segno) \
(((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec) (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
#define GET_SEG_FROM_SEC(sbi, secno) \ #define GET_SEG_FROM_SEC(sbi, secno) \
((secno) * (sbi)->segs_per_sec) ((secno) * SEGS_PER_SEC(sbi))
#define GET_ZONE_FROM_SEC(sbi, secno) \ #define GET_ZONE_FROM_SEC(sbi, secno) \
(((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone) (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
#define GET_ZONE_FROM_SEG(sbi, segno) \ #define GET_ZONE_FROM_SEG(sbi, segno) \
...@@ -364,7 +362,7 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, ...@@ -364,7 +362,7 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
unsigned int blocks = 0; unsigned int blocks = 0;
int i; int i;
for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) { for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
struct seg_entry *se = get_seg_entry(sbi, start_segno); struct seg_entry *se = get_seg_entry(sbi, start_segno);
blocks += se->ckpt_valid_blocks; blocks += se->ckpt_valid_blocks;
...@@ -449,7 +447,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -449,7 +447,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
free_i->free_segments++; free_i->free_segments++;
next = find_next_bit(free_i->free_segmap, next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno); start_segno + SEGS_PER_SEC(sbi), start_segno);
if (next >= start_segno + usable_segs) { if (next >= start_segno + usable_segs) {
clear_bit(secno, free_i->free_secmap); clear_bit(secno, free_i->free_secmap);
free_i->free_sections++; free_i->free_sections++;
...@@ -485,7 +483,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, ...@@ -485,7 +483,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
if (!inmem && IS_CURSEC(sbi, secno)) if (!inmem && IS_CURSEC(sbi, secno))
goto skip_free; goto skip_free;
next = find_next_bit(free_i->free_segmap, next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno); start_segno + SEGS_PER_SEC(sbi), start_segno);
if (next >= start_segno + usable_segs) { if (next >= start_segno + usable_segs) {
if (test_and_clear_bit(secno, free_i->free_secmap)) if (test_and_clear_bit(secno, free_i->free_secmap))
free_i->free_sections++; free_i->free_sections++;
...@@ -793,10 +791,10 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, ...@@ -793,10 +791,10 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
if (usable_blks_per_seg < sbi->blocks_per_seg) if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
sbi->blocks_per_seg, BLKS_PER_SEG(sbi),
usable_blks_per_seg) != sbi->blocks_per_seg); usable_blks_per_seg) != BLKS_PER_SEG(sbi));
/* check segment usage, and check boundary of a given segment number */ /* check segment usage, and check boundary of a given segment number */
if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
...@@ -915,9 +913,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) ...@@ -915,9 +913,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
return 0; return 0;
if (type == DATA) if (type == DATA)
return sbi->blocks_per_seg; return BLKS_PER_SEG(sbi);
else if (type == NODE) else if (type == NODE)
return 8 * sbi->blocks_per_seg; return 8 * BLKS_PER_SEG(sbi);
else if (type == META) else if (type == META)
return 8 * BIO_MAX_VECS; return 8 * BIO_MAX_VECS;
else else
......
...@@ -3643,7 +3643,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) ...@@ -3643,7 +3643,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
} }
main_segs = le32_to_cpu(raw_super->segment_count_main); main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg; blocks_per_seg = BLKS_PER_SEG(sbi);
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
...@@ -3756,8 +3756,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -3756,8 +3756,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
sbi->total_sections = le32_to_cpu(raw_super->section_count); sbi->total_sections = le32_to_cpu(raw_super->section_count);
sbi->total_node_count = sbi->total_node_count =
(le32_to_cpu(raw_super->segment_count_nat) / 2) ((le32_to_cpu(raw_super->segment_count_nat) / 2) *
* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; NAT_ENTRY_PER_BLOCK) << sbi->log_blocks_per_seg;
F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino); F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino); F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino); F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
...@@ -3766,7 +3766,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -3766,7 +3766,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->next_victim_seg[BG_GC] = NULL_SEGNO; sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
sbi->next_victim_seg[FG_GC] = NULL_SEGNO; sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = sbi->segs_per_sec; sbi->migration_granularity = SEGS_PER_SEC(sbi);
sbi->seq_file_ra_mul = MIN_RA_MUL; sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE; sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE; sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
......
...@@ -493,8 +493,8 @@ static ssize_t __sbi_store(struct f2fs_attr *a, ...@@ -493,8 +493,8 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
if (t > (unsigned long)(sbi->user_block_count - if (t > (unsigned long)(sbi->user_block_count -
F2FS_OPTION(sbi).root_reserved_blocks - F2FS_OPTION(sbi).root_reserved_blocks -
sbi->blocks_per_seg * (SM_I(sbi)->additional_reserved_segments <<
SM_I(sbi)->additional_reserved_segments)) { sbi->log_blocks_per_seg))) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
return -EINVAL; return -EINVAL;
} }
...@@ -551,7 +551,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a, ...@@ -551,7 +551,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
} }
if (!strcmp(a->attr.name, "migration_granularity")) { if (!strcmp(a->attr.name, "migration_granularity")) {
if (t == 0 || t > sbi->segs_per_sec) if (t == 0 || t > SEGS_PER_SEC(sbi))
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment