Commit 9ecf4b80 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: use ra_meta_pages to simplify readahead code in restore_node_summary

Use more common function ra_meta_pages() with META_POR to readahead node blocks
in restore_node_summary() instead of ra_sum_pages(), hence we can simplify the
readahead code there, and also we can remove unused function ra_sum_pages().

changes from v2:
 o use invalidate_mapping_pages as before suggested by Changman Lee.
changes from v1:
 o fix one bug when using truncate_inode_pages_range which is pointed out by
   Jaegeuk Kim.
Reviewed-by: default avatarChangman Lee <cm224.lee@samsung.com>
Signed-off-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 5c27f4ee
...@@ -1727,80 +1727,41 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) ...@@ -1727,80 +1727,41 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
return 0; return 0;
} }
/*
* ra_sum_pages() merge contiguous pages into one bio and submit.
* these pre-read pages are allocated in bd_inode's mapping tree.
*/
static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
int start, int nrpages)
{
struct inode *inode = sbi->sb->s_bdev->bd_inode;
struct address_space *mapping = inode->i_mapping;
int i, page_idx = start;
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
/* alloc page in bd_inode for reading node summary info */
pages[i] = grab_cache_page(mapping, page_idx);
if (!pages[i])
break;
f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
}
f2fs_submit_merged_bio(sbi, META, READ);
return i;
}
int restore_node_summary(struct f2fs_sb_info *sbi, int restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum) unsigned int segno, struct f2fs_summary_block *sum)
{ {
struct f2fs_node *rn; struct f2fs_node *rn;
struct f2fs_summary *sum_entry; struct f2fs_summary *sum_entry;
struct inode *inode = sbi->sb->s_bdev->bd_inode;
block_t addr; block_t addr;
int bio_blocks = MAX_BIO_BLOCKS(sbi); int bio_blocks = MAX_BIO_BLOCKS(sbi);
struct page *pages[bio_blocks]; int i, idx, last_offset, nrpages;
int i, idx, last_offset, nrpages, err = 0;
/* scan the node segment */ /* scan the node segment */
last_offset = sbi->blocks_per_seg; last_offset = sbi->blocks_per_seg;
addr = START_BLOCK(sbi, segno); addr = START_BLOCK(sbi, segno);
sum_entry = &sum->entries[0]; sum_entry = &sum->entries[0];
for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
nrpages = min(last_offset - i, bio_blocks); nrpages = min(last_offset - i, bio_blocks);
/* readahead node pages */ /* readahead node pages */
nrpages = ra_sum_pages(sbi, pages, addr, nrpages); ra_meta_pages(sbi, addr, nrpages, META_POR);
if (!nrpages)
return -ENOMEM;
for (idx = 0; idx < nrpages; idx++) { for (idx = addr; idx < addr + nrpages; idx++) {
if (err) struct page *page = get_meta_page(sbi, idx);
goto skip;
lock_page(pages[idx]); rn = F2FS_NODE(page);
if (unlikely(!PageUptodate(pages[idx]))) {
err = -EIO;
} else {
rn = F2FS_NODE(pages[idx]);
sum_entry->nid = rn->footer.nid; sum_entry->nid = rn->footer.nid;
sum_entry->version = 0; sum_entry->version = 0;
sum_entry->ofs_in_node = 0; sum_entry->ofs_in_node = 0;
sum_entry++; sum_entry++;
} f2fs_put_page(page, 1);
unlock_page(pages[idx]);
skip:
page_cache_release(pages[idx]);
} }
invalidate_mapping_pages(inode->i_mapping, addr, invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages); addr + nrpages);
} }
return err; return 0;
} }
static void remove_nats_in_journal(struct f2fs_sb_info *sbi) static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment