Commit cfb271d4 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: add unlikely() macro for compiler optimization

As we know, some of our branch condition will rarely be true. So we could add
'unlikely' to let compiler optimize these code, by this way we could drop
unneeded 'jump' assemble code to improve performance.

change log:
 o add *unlikely* as many as possible across the whole source files at once
   suggested by Jaegeuk Kim.
Suggested-by: default avatarJaegeuk Kim <jaegeuk.kim@samsung.com>
Signed-off-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk.kim@samsung.com>
parent b9987a27
...@@ -82,13 +82,12 @@ static int f2fs_write_meta_page(struct page *page, ...@@ -82,13 +82,12 @@ static int f2fs_write_meta_page(struct page *page,
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
/* Should not write any meta pages, if any IO error was occurred */ /* Should not write any meta pages, if any IO error was occurred */
if (wbc->for_reclaim || sbi->por_doing || if (unlikely(sbi->por_doing ||
is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) { is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
dec_page_count(sbi, F2FS_DIRTY_META); goto redirty_out;
wbc->pages_skipped++;
set_page_dirty(page); if (wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE; goto redirty_out;
}
wait_on_page_writeback(page); wait_on_page_writeback(page);
...@@ -96,6 +95,12 @@ static int f2fs_write_meta_page(struct page *page, ...@@ -96,6 +95,12 @@ static int f2fs_write_meta_page(struct page *page,
dec_page_count(sbi, F2FS_DIRTY_META); dec_page_count(sbi, F2FS_DIRTY_META);
unlock_page(page); unlock_page(page);
return 0; return 0;
redirty_out:
dec_page_count(sbi, F2FS_DIRTY_META);
wbc->pages_skipped++;
set_page_dirty(page);
return AOP_WRITEPAGE_ACTIVATE;
} }
static int f2fs_write_meta_pages(struct address_space *mapping, static int f2fs_write_meta_pages(struct address_space *mapping,
...@@ -137,7 +142,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, ...@@ -137,7 +142,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0) if (unlikely(nr_pages == 0))
break; break;
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
...@@ -150,7 +155,8 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, ...@@ -150,7 +155,8 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
unlock_page(page); unlock_page(page);
break; break;
} }
if (nwritten++ >= nr_to_write) nwritten++;
if (unlikely(nwritten >= nr_to_write))
break; break;
} }
pagevec_release(&pvec); pagevec_release(&pvec);
...@@ -200,7 +206,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) ...@@ -200,7 +206,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
* F2FS_ORPHANS_PER_BLOCK; * F2FS_ORPHANS_PER_BLOCK;
mutex_lock(&sbi->orphan_inode_mutex); mutex_lock(&sbi->orphan_inode_mutex);
if (sbi->n_orphans >= max_orphans) if (unlikely(sbi->n_orphans >= max_orphans))
err = -ENOSPC; err = -ENOSPC;
else else
sbi->n_orphans++; sbi->n_orphans++;
......
...@@ -251,7 +251,7 @@ int reserve_new_block(struct dnode_of_data *dn) ...@@ -251,7 +251,7 @@ int reserve_new_block(struct dnode_of_data *dn)
if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
return -EPERM; return -EPERM;
if (!inc_valid_block_count(sbi, dn->inode, 1)) if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
return -ENOSPC; return -ENOSPC;
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
...@@ -711,7 +711,7 @@ static int f2fs_write_data_page(struct page *page, ...@@ -711,7 +711,7 @@ static int f2fs_write_data_page(struct page *page,
zero_user_segment(page, offset, PAGE_CACHE_SIZE); zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write: write:
if (sbi->por_doing) { if (unlikely(sbi->por_doing)) {
err = AOP_WRITEPAGE_ACTIVATE; err = AOP_WRITEPAGE_ACTIVATE;
goto redirty_out; goto redirty_out;
} }
......
...@@ -190,7 +190,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, ...@@ -190,7 +190,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
unsigned int max_depth; unsigned int max_depth;
unsigned int level; unsigned int level;
if (namelen > F2FS_NAME_LEN) if (unlikely(namelen > F2FS_NAME_LEN))
return NULL; return NULL;
if (npages == 0) if (npages == 0)
...@@ -461,7 +461,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in ...@@ -461,7 +461,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
} }
start: start:
if (current_depth == MAX_DIR_HASH_DEPTH) if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC; return -ENOSPC;
/* Increase the depth, if required */ /* Increase the depth, if required */
......
...@@ -574,7 +574,7 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) ...@@ -574,7 +574,7 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
{ {
WARN_ON((nid >= NM_I(sbi)->max_nid)); WARN_ON((nid >= NM_I(sbi)->max_nid));
if (nid >= NM_I(sbi)->max_nid) if (unlikely(nid >= NM_I(sbi)->max_nid))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
...@@ -600,7 +600,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, ...@@ -600,7 +600,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
valid_block_count = valid_block_count =
sbi->total_valid_block_count + (block_t)count; sbi->total_valid_block_count + (block_t)count;
if (valid_block_count > sbi->user_block_count) { if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
return false; return false;
} }
...@@ -719,13 +719,13 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, ...@@ -719,13 +719,13 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
valid_block_count = sbi->total_valid_block_count + 1; valid_block_count = sbi->total_valid_block_count + 1;
if (valid_block_count > sbi->user_block_count) { if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
return false; return false;
} }
valid_node_count = sbi->total_valid_node_count + 1; valid_node_count = sbi->total_valid_node_count + 1;
if (valid_node_count > sbi->total_node_count) { if (unlikely(valid_node_count > sbi->total_node_count)) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
return false; return false;
} }
......
...@@ -94,7 +94,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) ...@@ -94,7 +94,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
int i; int i;
for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
if (nid >= nm_i->max_nid) if (unlikely(nid >= nm_i->max_nid))
nid = 0; nid = 0;
index = current_nat_addr(sbi, nid); index = current_nat_addr(sbi, nid);
...@@ -1160,7 +1160,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) ...@@ -1160,7 +1160,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */ /* until radix tree lookup accepts end_index */
if (page->index > end) if (unlikely(page->index > end))
continue; continue;
if (ino && ino_of_node(page) == ino) { if (ino && ino_of_node(page) == ino) {
...@@ -1190,7 +1190,7 @@ static int f2fs_write_node_page(struct page *page, ...@@ -1190,7 +1190,7 @@ static int f2fs_write_node_page(struct page *page,
block_t new_addr; block_t new_addr;
struct node_info ni; struct node_info ni;
if (sbi->por_doing) if (unlikely(sbi->por_doing))
goto redirty_out; goto redirty_out;
wait_on_page_writeback(page); wait_on_page_writeback(page);
...@@ -1326,7 +1326,7 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build) ...@@ -1326,7 +1326,7 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
return -1; return -1;
/* 0 nid should not be used */ /* 0 nid should not be used */
if (nid == 0) if (unlikely(nid == 0))
return 0; return 0;
if (build) { if (build) {
...@@ -1379,7 +1379,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i, ...@@ -1379,7 +1379,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
if (start_nid >= nm_i->max_nid) if (unlikely(start_nid >= nm_i->max_nid))
break; break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
...@@ -1413,7 +1413,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) ...@@ -1413,7 +1413,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
if (nid >= nm_i->max_nid) if (unlikely(nid >= nm_i->max_nid))
nid = 0; nid = 0;
if (i++ == FREE_NID_PAGES) if (i++ == FREE_NID_PAGES)
...@@ -1447,7 +1447,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) ...@@ -1447,7 +1447,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *i = NULL; struct free_nid *i = NULL;
struct list_head *this; struct list_head *this;
retry: retry:
if (sbi->total_valid_node_count + 1 >= nm_i->max_nid) if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
return false; return false;
spin_lock(&nm_i->free_nid_list_lock); spin_lock(&nm_i->free_nid_list_lock);
...@@ -1557,7 +1557,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) ...@@ -1557,7 +1557,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
new_ni = old_ni; new_ni = old_ni;
new_ni.ino = ino; new_ni.ino = ino;
if (!inc_valid_node_count(sbi, NULL)) if (unlikely(!inc_valid_node_count(sbi, NULL)))
WARN_ON(1); WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR); set_node_addr(sbi, &new_ni, NEW_ADDR);
inc_valid_inode_count(sbi); inc_valid_inode_count(sbi);
......
...@@ -457,7 +457,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) ...@@ -457,7 +457,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (sbi->por_doing) if (unlikely(sbi->por_doing))
return false; return false;
return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment