Commit ad4edb83 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: produce more nids and reduce readahead nats

The readahead nat pages are more likely to be reclaimed quickly, so it'd better
to gather more free nids in advance.

And, let's keep some free nids as much as possible.
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 52763a4b
...@@ -941,6 +941,8 @@ static int block_operations(struct f2fs_sb_info *sbi) ...@@ -941,6 +941,8 @@ static int block_operations(struct f2fs_sb_info *sbi)
static void unblock_operations(struct f2fs_sb_info *sbi) static void unblock_operations(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->node_write); up_write(&sbi->node_write);
build_free_nids(sbi);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
} }
......
...@@ -1965,6 +1965,7 @@ void move_node_page(struct page *, int); ...@@ -1965,6 +1965,7 @@ void move_node_page(struct page *, int);
int fsync_node_pages(struct f2fs_sb_info *, struct inode *, int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
struct writeback_control *, bool); struct writeback_control *, bool);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *); int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
void build_free_nids(struct f2fs_sb_info *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *); bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t); void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t); void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
......
...@@ -1765,7 +1765,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, ...@@ -1765,7 +1765,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
} }
} }
static void build_free_nids(struct f2fs_sb_info *sbi) void build_free_nids(struct f2fs_sb_info *sbi)
{ {
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
...@@ -1774,7 +1774,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi) ...@@ -1774,7 +1774,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nid_t nid = nm_i->next_scan_nid; nid_t nid = nm_i->next_scan_nid;
/* Enough entries */ /* Enough entries */
if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
return; return;
/* readahead nat pages to be scanned */ /* readahead nat pages to be scanned */
...@@ -1912,12 +1912,15 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) ...@@ -1912,12 +1912,15 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next; struct free_nid *i, *next;
int nr = nr_shrink; int nr = nr_shrink;
if (nm_i->fcnt <= MAX_FREE_NIDS)
return 0;
if (!mutex_trylock(&nm_i->build_lock)) if (!mutex_trylock(&nm_i->build_lock))
return 0; return 0;
spin_lock(&nm_i->free_nid_list_lock); spin_lock(&nm_i->free_nid_list_lock);
list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK) if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
break; break;
if (i->state == NID_ALLOC) if (i->state == NID_ALLOC)
continue; continue;
......
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK) #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */ /* # of pages to perform synchronous readahead before building free nids */
#define FREE_NID_PAGES 4 #define FREE_NID_PAGES 8
#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */ #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */ /* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128 #define MAX_RA_NODE 128
......
...@@ -371,7 +371,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) ...@@ -371,7 +371,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS)) if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); try_to_free_nids(sbi, MAX_FREE_NIDS);
else
build_free_nids(sbi);
/* checkpoint is the only way to shrink partial cached entries */ /* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) || if (!available_free_memory(sbi, NAT_ENTRIES) ||
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/f2fs_fs.h> #include <linux/f2fs_fs.h>
#include "f2fs.h" #include "f2fs.h"
#include "node.h"
static LIST_HEAD(f2fs_list); static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock); static DEFINE_SPINLOCK(f2fs_list_lock);
...@@ -25,8 +26,8 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) ...@@ -25,8 +26,8 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{ {
if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK) if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK; return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment