Commit 4c521f49 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: use meta_inode cache to improve roll-forward speed

Previously, all the dnode pages should be read during the roll-forward recovery.
Even worsely, whole the chain was traversed twice.
This patch removes that redundant and costly read operations by using page cache
of meta_inode and readahead function as well.
Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 60979115
...@@ -72,7 +72,23 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -72,7 +72,23 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return page; return page;
} }
static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type) struct page *get_meta_page_ra(struct f2fs_sb_info *sbi, pgoff_t index)
{
bool readahead = false;
struct page *page;
page = find_get_page(META_MAPPING(sbi), index);
if (!page || (page && !PageUptodate(page)))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
ra_meta_pages(sbi, index,
MAX_BIO_BLOCKS(max_hw_blocks(sbi)), META_POR);
return get_meta_page(sbi, index);
}
static inline block_t get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
{ {
switch (type) { switch (type) {
case META_NAT: case META_NAT:
...@@ -82,6 +98,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type) ...@@ -82,6 +98,8 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
case META_SSA: case META_SSA:
case META_CP: case META_CP:
return 0; return 0;
case META_POR:
return SM_I(sbi)->seg0_blkaddr + TOTAL_BLKS(sbi);
default: default:
BUG(); BUG();
} }
...@@ -90,12 +108,13 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type) ...@@ -90,12 +108,13 @@ static inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
/* /*
* Readahead CP/NAT/SIT/SSA pages * Readahead CP/NAT/SIT/SSA pages
*/ */
int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type) int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
{ {
block_t prev_blk_addr = 0; block_t prev_blk_addr = 0;
struct page *page; struct page *page;
int blkno = start; block_t blkno = start;
int max_blks = get_max_meta_blks(sbi, type); block_t max_blks = get_max_meta_blks(sbi, type);
block_t min_blks = SM_I(sbi)->seg0_blkaddr;
struct f2fs_io_info fio = { struct f2fs_io_info fio = {
.type = META, .type = META,
...@@ -125,7 +144,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type) ...@@ -125,7 +144,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
break; break;
case META_SSA: case META_SSA:
case META_CP: case META_CP:
/* get ssa/cp block addr */ case META_POR:
if (unlikely(blkno >= max_blks))
goto out;
if (unlikely(blkno < min_blks))
goto out;
blk_addr = blkno; blk_addr = blkno;
break; break;
default: default:
......
...@@ -103,7 +103,8 @@ enum { ...@@ -103,7 +103,8 @@ enum {
META_CP, META_CP,
META_NAT, META_NAT,
META_SIT, META_SIT,
META_SSA META_SSA,
META_POR,
}; };
/* for the list of ino */ /* for the list of ino */
...@@ -1294,7 +1295,8 @@ void destroy_segment_manager_caches(void); ...@@ -1294,7 +1295,8 @@ void destroy_segment_manager_caches(void);
*/ */
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
int ra_meta_pages(struct f2fs_sb_info *, int, int, int); struct page *get_meta_page_ra(struct f2fs_sb_info *, pgoff_t);
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type); void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type); void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
......
...@@ -138,7 +138,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -138,7 +138,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
struct page *page; struct page *page = NULL;
block_t blkaddr; block_t blkaddr;
int err = 0; int err = 0;
...@@ -146,20 +146,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -146,20 +146,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/* read node page */
page = alloc_page(GFP_F2FS_ZERO);
if (!page)
return -ENOMEM;
lock_page(page);
while (1) { while (1) {
struct fsync_inode_entry *entry; struct fsync_inode_entry *entry;
err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); if (blkaddr < SM_I(sbi)->main_blkaddr ||
if (err) blkaddr >= (SM_I(sbi)->seg0_blkaddr + TOTAL_BLKS(sbi)))
return err; return 0;
lock_page(page); page = get_meta_page_ra(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) if (cp_ver != cpver_of_node(page))
break; break;
...@@ -202,11 +196,9 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -202,11 +196,9 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
next: next:
/* check next segment */ /* check next segment */
blkaddr = next_blkaddr_of_node(page); blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
} }
f2fs_put_page(page, 1);
unlock_page(page);
__free_pages(page, 0);
return err; return err;
} }
...@@ -400,7 +392,7 @@ static int recover_data(struct f2fs_sb_info *sbi, ...@@ -400,7 +392,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
struct page *page; struct page *page = NULL;
int err = 0; int err = 0;
block_t blkaddr; block_t blkaddr;
...@@ -408,32 +400,29 @@ static int recover_data(struct f2fs_sb_info *sbi, ...@@ -408,32 +400,29 @@ static int recover_data(struct f2fs_sb_info *sbi,
curseg = CURSEG_I(sbi, type); curseg = CURSEG_I(sbi, type);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/* read node page */
page = alloc_page(GFP_F2FS_ZERO);
if (!page)
return -ENOMEM;
lock_page(page);
while (1) { while (1) {
struct fsync_inode_entry *entry; struct fsync_inode_entry *entry;
err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); if (blkaddr < SM_I(sbi)->main_blkaddr ||
if (err) blkaddr >= (SM_I(sbi)->seg0_blkaddr + TOTAL_BLKS(sbi)))
return err; break;
lock_page(page); page = get_meta_page_ra(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) if (cp_ver != cpver_of_node(page)) {
f2fs_put_page(page, 1);
break; break;
}
entry = get_fsync_inode(head, ino_of_node(page)); entry = get_fsync_inode(head, ino_of_node(page));
if (!entry) if (!entry)
goto next; goto next;
err = do_recover_data(sbi, entry->inode, page, blkaddr); err = do_recover_data(sbi, entry->inode, page, blkaddr);
if (err) if (err) {
f2fs_put_page(page, 1);
break; break;
}
if (entry->blkaddr == blkaddr) { if (entry->blkaddr == blkaddr) {
iput(entry->inode); iput(entry->inode);
...@@ -443,11 +432,8 @@ static int recover_data(struct f2fs_sb_info *sbi, ...@@ -443,11 +432,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
next: next:
/* check next segment */ /* check next segment */
blkaddr = next_blkaddr_of_node(page); blkaddr = next_blkaddr_of_node(page);
f2fs_put_page(page, 1);
} }
unlock_page(page);
__free_pages(page, 0);
if (!err) if (!err)
allocate_new_segments(sbi); allocate_new_segments(sbi);
return err; return err;
...@@ -493,6 +479,10 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -493,6 +479,10 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
destroy_fsync_dnodes(&inode_list); destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab); kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
SM_I(sbi)->main_blkaddr << PAGE_CACHE_SHIFT, -1);
if (err) { if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi)); truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi)); truncate_inode_pages_final(META_MAPPING(sbi));
......
...@@ -87,6 +87,7 @@ ...@@ -87,6 +87,7 @@
(BITS_TO_LONGS(nr) * sizeof(unsigned long)) (BITS_TO_LONGS(nr) * sizeof(unsigned long))
#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
#define TOTAL_SECS(sbi) (sbi->total_sections) #define TOTAL_SECS(sbi) (sbi->total_sections)
#define TOTAL_BLKS(sbi) (SM_I(sbi)->segment_count << sbi->log_blocks_per_seg)
#define SECTOR_FROM_BLOCK(sbi, blk_addr) \ #define SECTOR_FROM_BLOCK(sbi, blk_addr) \
(((sector_t)blk_addr) << (sbi)->log_sectors_per_block) (((sector_t)blk_addr) << (sbi)->log_sectors_per_block)
...@@ -553,7 +554,7 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -553,7 +554,7 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{ {
struct f2fs_sm_info *sm_info = SM_I(sbi); struct f2fs_sm_info *sm_info = SM_I(sbi);
block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg; block_t total_blks = TOTAL_BLKS(sbi);
block_t start_addr = sm_info->seg0_blkaddr; block_t start_addr = sm_info->seg0_blkaddr;
block_t end_addr = start_addr + total_blks - 1; block_t end_addr = start_addr + total_blks - 1;
BUG_ON(blk_addr < start_addr); BUG_ON(blk_addr < start_addr);
...@@ -606,7 +607,7 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -606,7 +607,7 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{ {
struct f2fs_sm_info *sm_info = SM_I(sbi); struct f2fs_sm_info *sm_info = SM_I(sbi);
block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg; block_t total_blks = TOTAL_BLKS(sbi);
block_t start_addr = sm_info->seg0_blkaddr; block_t start_addr = sm_info->seg0_blkaddr;
block_t end_addr = start_addr + total_blks - 1; block_t end_addr = start_addr + total_blks - 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment