Commit a66cdd98 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: introduce discard_map for f2fs_trim_fs

This patch adds a bitmap for discard issues from f2fs_trim_fs.
There-in rule is to issue discard commands only for invalidated blocks
after mount.
Once mount is done, f2fs_trim_fs trims out whole invalid area.
After ehn, it will not issue and discrads redundantly.
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent d6c67a4f
...@@ -1058,7 +1058,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1058,7 +1058,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&sbi->cp_mutex); mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC)) (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
goto out; goto out;
if (unlikely(f2fs_cp_error(sbi))) if (unlikely(f2fs_cp_error(sbi)))
goto out; goto out;
......
...@@ -143,7 +143,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -143,7 +143,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info); si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry); si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE; si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1) if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
......
...@@ -117,6 +117,8 @@ enum { ...@@ -117,6 +117,8 @@ enum {
#define DEF_BATCHED_TRIM_SECTIONS 32 #define DEF_BATCHED_TRIM_SECTIONS 32
#define BATCHED_TRIM_SEGMENTS(sbi) \ #define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec) (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
struct cp_control { struct cp_control {
int reason; int reason;
...@@ -698,6 +700,7 @@ struct f2fs_sb_info { ...@@ -698,6 +700,7 @@ struct f2fs_sb_info {
block_t user_block_count; /* # of user blocks */ block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */ block_t total_valid_block_count; /* # of valid blocks */
block_t alloc_valid_block_count; /* # of allocated blocks */ block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */ block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */ u32 s_next_generation; /* for NFS support */
atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
...@@ -1225,6 +1228,24 @@ static inline int f2fs_test_bit(unsigned int nr, char *addr) ...@@ -1225,6 +1228,24 @@ static inline int f2fs_test_bit(unsigned int nr, char *addr)
return mask & *addr; return mask & *addr;
} }
static inline void f2fs_set_bit(unsigned int nr, char *addr)
{
int mask;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
*addr |= mask;
}
static inline void f2fs_clear_bit(unsigned int nr, char *addr)
{
int mask;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
*addr &= ~mask;
}
static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
{ {
int mask; int mask;
......
...@@ -468,6 +468,17 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi, ...@@ -468,6 +468,17 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
{ {
sector_t start = SECTOR_FROM_BLOCK(blkstart); sector_t start = SECTOR_FROM_BLOCK(blkstart);
sector_t len = SECTOR_FROM_BLOCK(blklen); sector_t len = SECTOR_FROM_BLOCK(blklen);
struct seg_entry *se;
unsigned int offset;
block_t i;
for (i = blkstart; i < blkstart + blklen; i++) {
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
} }
...@@ -483,7 +494,8 @@ void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) ...@@ -483,7 +494,8 @@ void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
} }
static void __add_discard_entry(struct f2fs_sb_info *sbi, static void __add_discard_entry(struct f2fs_sb_info *sbi,
struct cp_control *cpc, unsigned int start, unsigned int end) struct cp_control *cpc, struct seg_entry *se,
unsigned int start, unsigned int end)
{ {
struct list_head *head = &SM_I(sbi)->discard_list; struct list_head *head = &SM_I(sbi)->discard_list;
struct discard_entry *new, *last; struct discard_entry *new, *last;
...@@ -514,41 +526,24 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -514,41 +526,24 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
unsigned long *cur_map = (unsigned long *)se->cur_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
unsigned long *discard_map = (unsigned long *)se->discard_map;
unsigned long *dmap = SIT_I(sbi)->tmp_map; unsigned long *dmap = SIT_I(sbi)->tmp_map;
unsigned int start = 0, end = -1; unsigned int start = 0, end = -1;
bool force = (cpc->reason == CP_DISCARD); bool force = (cpc->reason == CP_DISCARD);
int i; int i;
if (!force && (!test_opt(sbi, DISCARD) || if (se->valid_blocks == max_blocks)
SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards))
return; return;
if (force && !se->valid_blocks) { if (!force) {
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
/* SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
* if this segment is registered in the prefree list, then
* we should skip adding a discard candidate, and let the
* checkpoint do that later.
*/
mutex_lock(&dirty_i->seglist_lock);
if (test_bit(cpc->trim_start, dirty_i->dirty_segmap[PRE])) {
mutex_unlock(&dirty_i->seglist_lock);
cpc->trimmed += sbi->blocks_per_seg;
return;
}
mutex_unlock(&dirty_i->seglist_lock);
__add_discard_entry(sbi, cpc, 0, sbi->blocks_per_seg);
return; return;
} }
/* zero block will be discarded through the prefree list */
if (!se->valid_blocks || se->valid_blocks == max_blocks)
return;
/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
for (i = 0; i < entries; i++) for (i = 0; i < entries; i++)
dmap[i] = force ? ~ckpt_map[i] : dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
...@@ -561,7 +556,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -561,7 +556,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (force && end - start < cpc->trim_minlen) if (force && end - start < cpc->trim_minlen)
continue; continue;
__add_discard_entry(sbi, cpc, start, end); __add_discard_entry(sbi, cpc, se, start, end);
} }
} }
...@@ -675,9 +670,13 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) ...@@ -675,9 +670,13 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (del > 0) { if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
} else { } else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
if (f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
} }
if (!f2fs_test_bit(offset, se->ckpt_valid_map)) if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks += del; se->ckpt_valid_blocks += del;
...@@ -1080,7 +1079,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -1080,7 +1079,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
/* do checkpoint to issue discard commands safely */ /* do checkpoint to issue discard commands safely */
for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
cpc.trim_start = start_segno; cpc.trim_start = start_segno;
cpc.trim_end = min_t(unsigned int, rounddown(start_segno +
if (sbi->discard_blks == 0)
break;
else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
rounddown(start_segno +
BATCHED_TRIM_SEGMENTS(sbi), BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno); sbi->segs_per_sec) - 1, end_segno);
...@@ -1859,8 +1865,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi) ...@@ -1859,8 +1865,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map sit_i->sentries[start].discard_map
|| !sit_i->sentries[start].ckpt_valid_map) = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
!sit_i->sentries[start].ckpt_valid_map ||
!sit_i->sentries[start].discard_map)
return -ENOMEM; return -ENOMEM;
} }
...@@ -1998,6 +2007,11 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) ...@@ -1998,6 +2007,11 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
got_it: got_it:
check_block_count(sbi, start, &sit); check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit); seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
if (sbi->segs_per_sec > 1) { if (sbi->segs_per_sec > 1) {
struct sec_entry *e = get_sec_entry(sbi, start); struct sec_entry *e = get_sec_entry(sbi, start);
e->valid_blocks += se->valid_blocks; e->valid_blocks += se->valid_blocks;
...@@ -2247,6 +2261,7 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) ...@@ -2247,6 +2261,7 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
for (start = 0; start < MAIN_SEGS(sbi); start++) { for (start = 0; start < MAIN_SEGS(sbi); start++) {
kfree(sit_i->sentries[start].cur_valid_map); kfree(sit_i->sentries[start].cur_valid_map);
kfree(sit_i->sentries[start].ckpt_valid_map); kfree(sit_i->sentries[start].ckpt_valid_map);
kfree(sit_i->sentries[start].discard_map);
} }
} }
kfree(sit_i->tmp_map); kfree(sit_i->tmp_map);
......
...@@ -163,6 +163,7 @@ struct seg_entry { ...@@ -163,6 +163,7 @@ struct seg_entry {
*/ */
unsigned short ckpt_valid_blocks; unsigned short ckpt_valid_blocks;
unsigned char *ckpt_valid_map; unsigned char *ckpt_valid_map;
unsigned char *discard_map;
unsigned char type; /* segment type like CURSEG_XXX_TYPE */ unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */ unsigned long long mtime; /* modification time of the segment */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment