Commit 377224c4 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: don't split checkpoint in fstrim

Now, we issue discard asynchronously in separated thread instead of in
checkpoint, after that, we won't encounter long latency in checkpoint
due to huge number of synchronous discard command handling, so, we don't
need to split checkpoint to do trim in batch, merge it and obsolete
related sysfs entry.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 8bb4f253
...@@ -101,6 +101,7 @@ Date: February 2015 ...@@ -101,6 +101,7 @@ Date: February 2015
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org> Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Description:
Controls the trimming rate in batch mode. Controls the trimming rate in batch mode.
<deprecated>
What: /sys/fs/f2fs/<disk>/cp_interval What: /sys/fs/f2fs/<disk>/cp_interval
Date: October 2015 Date: October 2015
......
...@@ -176,11 +176,6 @@ enum { ...@@ -176,11 +176,6 @@ enum {
#define CP_DISCARD 0x00000010 #define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020 #define CP_TRIMMED 0x00000020
#define DEF_BATCHED_TRIM_SECTIONS 2048
#define BATCHED_TRIM_SEGMENTS(sbi) \
(GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */ #define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */
......
...@@ -2395,7 +2395,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -2395,7 +2395,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{ {
__u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno, cur_segno; unsigned int start_segno, end_segno;
block_t start_block, end_block; block_t start_block, end_block;
struct cp_control cpc; struct cp_control cpc;
struct discard_policy dpolicy; struct discard_policy dpolicy;
...@@ -2421,40 +2421,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -2421,40 +2421,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
cpc.reason = CP_DISCARD; cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
cpc.trim_start = start_segno;
/* do checkpoint to issue discard commands safely */ cpc.trim_end = end_segno;
for (cur_segno = start_segno; cur_segno <= end_segno;
cur_segno = cpc.trim_end + 1) {
cpc.trim_start = cur_segno;
if (sbi->discard_blks == 0) if (sbi->discard_blks == 0)
break; goto out;
else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
rounddown(cur_segno +
BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex); mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc); err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex); mutex_unlock(&sbi->gc_mutex);
if (err) if (err)
break; goto out;
schedule();
}
start_block = START_BLOCK(sbi, start_segno); start_block = START_BLOCK(sbi, start_segno);
end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1); end_block = START_BLOCK(sbi, end_segno + 1);
__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
__issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
trimmed = __wait_discard_cmd_range(sbi, &dpolicy, trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
start_block, end_block); start_block, end_block);
out:
range->len = F2FS_BLK_TO_BYTES(trimmed); range->len = F2FS_BLK_TO_BYTES(trimmed);
out:
return err; return err;
} }
...@@ -3841,8 +3828,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi) ...@@ -3841,8 +3828,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->min_ssr_sections = reserved_sections(sbi); sm_info->min_ssr_sections = reserved_sections(sbi);
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set); INIT_LIST_HEAD(&sm_info->sit_entry_set);
init_rwsem(&sm_info->curseg_lock); init_rwsem(&sm_info->curseg_lock);
......
...@@ -245,6 +245,9 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, ...@@ -245,6 +245,9 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
return count; return count;
} }
if (!strcmp(a->attr.name, "trim_sections"))
return -EINVAL;
*ui = t; *ui = t;
if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0) if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment