Commit 4c1fad64 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've investigated how f2fs deals with errors given by
  our fault injection facility. With this, we could fix several corner
  cases. And, in order to improve the performance, we set inline_dentry
  by default and enhance the exisiting discard issue flow. In addition,
  we added f2fs_migrate_page for better memory management.

  Enhancements:
   - set inline_dentry by default
   - improve discard issue flow
   - add more fault injection cases in f2fs
   - allow block preallocation for encrypted files
   - introduce migrate_page callback function
   - avoid truncating the next direct node block at every checkpoint

  Bug fixes:
   - set page flag correctly between write_begin and write_end
   - missing error handling cases detected by fault injection
   - preallocate blocks regarding to 4KB alignement correctly
   - dentry and filename handling of encryption
   - lost xattrs of directories"

* tag 'for-f2fs-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (69 commits)
  f2fs: introduce update_ckpt_flags to clean up
  f2fs: don't submit irrelevant page
  f2fs: fix to commit bio cache after flushing node pages
  f2fs: introduce get_checkpoint_version for cleanup
  f2fs: remove dead variable
  f2fs: remove redundant io plug
  f2fs: support checkpoint error injection
  f2fs: fix to recover old fault injection config in ->remount_fs
  f2fs: do fault injection initialization in default_options
  f2fs: remove redundant value definition
  f2fs: support configuring fault injection per superblock
  f2fs: adjust display format of segment bit
  f2fs: remove dirty inode pages in error path
  f2fs: do not unnecessarily null-terminate encrypted symlink data
  f2fs: handle errors during recover_orphan_inodes
  f2fs: avoid gc in cp_error case
  f2fs: should put_page for summary page
  f2fs: assign return value in f2fs_gc
  f2fs: add customized migrate_page callback
  f2fs: introduce cp_lock to protect updating of ckpt_flags
  ...
parents 0fb3ca44 e4c5d848
...@@ -131,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created ...@@ -131,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The directory entries can be written into inode block. The
space of inode block which is used to store inline space of inode block which is used to store inline
dentries is limited to ~3.4k. dentries is limited to ~3.4k.
noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly, device handles the cache_flush command relatively slowly,
......
...@@ -5105,10 +5105,9 @@ F: include/linux/fscrypto.h ...@@ -5105,10 +5105,9 @@ F: include/linux/fscrypto.h
F2FS FILE SYSTEM F2FS FILE SYSTEM
M: Jaegeuk Kim <jaegeuk@kernel.org> M: Jaegeuk Kim <jaegeuk@kernel.org>
M: Changman Lee <cm224.lee@samsung.com> M: Chao Yu <yuchao0@huawei.com>
R: Chao Yu <yuchao0@huawei.com>
L: linux-f2fs-devel@lists.sourceforge.net L: linux-f2fs-devel@lists.sourceforge.net
W: http://en.wikipedia.org/wiki/F2FS W: https://f2fs.wiki.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
S: Maintained S: Maintained
F: Documentation/filesystems/f2fs.txt F: Documentation/filesystems/f2fs.txt
......
...@@ -109,14 +109,16 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size) ...@@ -109,14 +109,16 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
const struct posix_acl *acl, size_t *size)
{ {
struct f2fs_acl_header *f2fs_acl; struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry; struct f2fs_acl_entry *entry;
int i; int i;
f2fs_acl = f2fs_kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
sizeof(struct f2fs_acl_entry), GFP_NOFS); acl->a_count * sizeof(struct f2fs_acl_entry),
GFP_NOFS);
if (!f2fs_acl) if (!f2fs_acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, ...@@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage); retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) { if (retval > 0) {
value = f2fs_kmalloc(retval, GFP_F2FS_ZERO); value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value) if (!value)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value, retval = f2fs_getxattr(inode, name_index, "", value,
...@@ -230,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, ...@@ -230,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
} }
if (acl) { if (acl) {
value = f2fs_acl_to_disk(acl, &size); value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) { if (IS_ERR(value)) {
clear_inode_flag(inode, FI_ACL_MODE); clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value); return (int)PTR_ERR(value);
......
...@@ -41,7 +41,6 @@ extern int f2fs_set_acl(struct inode *, struct posix_acl *, int); ...@@ -41,7 +41,6 @@ extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *, extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *); struct page *);
#else #else
#define f2fs_check_acl NULL
#define f2fs_get_acl NULL #define f2fs_get_acl NULL
#define f2fs_set_acl NULL #define f2fs_set_acl NULL
......
...@@ -28,7 +28,7 @@ struct kmem_cache *inode_entry_slab; ...@@ -28,7 +28,7 @@ struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{ {
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); set_ckpt_flags(sbi, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY; sbi->sb->s_flags |= MS_RDONLY;
if (!end_io) if (!end_io)
f2fs_flush_merged_bios(sbi); f2fs_flush_merged_bios(sbi);
...@@ -267,7 +267,6 @@ static int f2fs_write_meta_pages(struct address_space *mapping, ...@@ -267,7 +267,6 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct blk_plug plug;
long diff, written; long diff, written;
/* collect a number of dirty meta pages and write together */ /* collect a number of dirty meta pages and write together */
...@@ -280,9 +279,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping, ...@@ -280,9 +279,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */ /* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex); mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc); diff = nr_pages_to_write(sbi, META, wbc);
blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write); written = sync_meta_pages(sbi, META, wbc->nr_to_write);
blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex); mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0; return 0;
...@@ -388,6 +385,9 @@ const struct address_space_operations f2fs_meta_aops = { ...@@ -388,6 +385,9 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty, .set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page, .invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page, .releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
}; };
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
...@@ -491,7 +491,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) ...@@ -491,7 +491,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
spin_lock(&im->ino_lock); spin_lock(&im->ino_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_ORPHAN)) { if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock); spin_unlock(&im->ino_lock);
return -ENOSPC; return -ENOSPC;
} }
...@@ -531,8 +531,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) ...@@ -531,8 +531,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{ {
struct inode *inode; struct inode *inode;
struct node_info ni;
int err = acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return err;
}
inode = f2fs_iget(sbi->sb, ino); __add_ino_entry(sbi, ino, ORPHAN_INO);
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
/* /*
* there should be a bug that we can't find the entry * there should be a bug that we can't find the entry
...@@ -546,6 +558,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) ...@@ -546,6 +558,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */ /* truncate all the data during iput */
iput(inode); iput(inode);
get_node_info(sbi, ino, &ni);
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: orphan failed (ino=%x), run fsck to fix.",
__func__, ino);
return -EIO;
}
__remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0; return 0;
} }
...@@ -554,7 +578,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) ...@@ -554,7 +578,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j; block_t start_blk, orphan_blocks, i, j;
int err; int err;
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0; return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
...@@ -578,7 +602,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) ...@@ -578,7 +602,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
} }
/* clear Orphan Flag */ /* clear Orphan Flag */
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0; return 0;
} }
...@@ -639,45 +663,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) ...@@ -639,45 +663,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
} }
} }
static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
block_t cp_addr, unsigned long long *version) struct f2fs_checkpoint **cp_block, struct page **cp_page,
unsigned long long *version)
{ {
struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize; unsigned long blk_size = sbi->blocksize;
struct f2fs_checkpoint *cp_block; size_t crc_offset = 0;
unsigned long long cur_version = 0, pre_version = 0;
size_t crc_offset;
__u32 crc = 0; __u32 crc = 0;
/* Read the 1st cp block in this CP pack */ *cp_page = get_meta_page(sbi, cp_addr);
cp_page_1 = get_meta_page(sbi, cp_addr); *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
/* get the version number */ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); if (crc_offset >= blk_size) {
crc_offset = le32_to_cpu(cp_block->checksum_offset); f2fs_msg(sbi->sb, KERN_WARNING,
if (crc_offset >= blk_size) "invalid crc_offset: %zu", crc_offset);
goto invalid_cp1; return -EINVAL;
}
crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset))); crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset)) + crc_offset)));
goto invalid_cp1; if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL;
}
pre_version = cur_cp_version(cp_block); *version = cur_cp_version(*cp_block);
return 0;
}
/* Read the 2nd cp block in this CP pack */ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; block_t cp_addr, unsigned long long *version)
cp_page_2 = get_meta_page(sbi, cp_addr); {
struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
struct f2fs_checkpoint *cp_block = NULL;
unsigned long long cur_version = 0, pre_version = 0;
int err;
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); err = get_checkpoint_version(sbi, cp_addr, &cp_block,
crc_offset = le32_to_cpu(cp_block->checksum_offset); &cp_page_1, version);
if (crc_offset >= blk_size) if (err)
goto invalid_cp2; goto invalid_cp1;
pre_version = *version;
crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset))); cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
if (!f2fs_crc_valid(sbi, crc, cp_block, crc_offset)) err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_2, version);
if (err)
goto invalid_cp2; goto invalid_cp2;
cur_version = *version;
cur_version = cur_cp_version(cp_block);
if (cur_version == pre_version) { if (cur_version == pre_version) {
*version = cur_version; *version = cur_version;
...@@ -972,10 +1006,40 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) ...@@ -972,10 +1006,40 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
finish_wait(&sbi->cp_wait, &wait); finish_wait(&sbi->cp_wait, &wait);
} }
static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
spin_lock(&sbi->cp_lock);
if (cpc->reason == CP_UMOUNT)
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
if (cpc->reason == CP_FASTBOOT)
__set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
if (orphan_num)
__set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
spin_unlock(&sbi->cp_lock);
}
static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{ {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid; nid_t last_nid = nm_i->next_scan_nid;
...@@ -984,19 +1048,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -984,19 +1048,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0; __u32 crc32 = 0;
int i; int i;
int cp_payload_blks = __cp_payload(sbi); int cp_payload_blks = __cp_payload(sbi);
block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
bool invalidate = false;
struct super_block *sb = sbi->sb; struct super_block *sb = sbi->sb;
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
u64 kbytes_written; u64 kbytes_written;
/*
* This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below.
*/
if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
invalidate = true;
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) { while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX); sync_meta_pages(sbi, META, LONG_MAX);
...@@ -1036,10 +1091,12 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1036,10 +1091,12 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* 2 cp + n data seg summary + orphan inode blocks */ /* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false); data_sum_blocks = npages_for_summary_flush(sbi, false);
spin_lock(&sbi->cp_lock);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE) if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else else
clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
spin_unlock(&sbi->cp_lock);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num); orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
...@@ -1054,23 +1111,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1054,23 +1111,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_payload_blks + data_sum_blocks + cp_payload_blks + data_sum_blocks +
orphan_blocks); orphan_blocks);
if (cpc->reason == CP_UMOUNT) /* update ckpt flag for checkpoint */
set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); update_ckpt_flags(sbi, cpc);
else
clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
if (cpc->reason == CP_FASTBOOT)
set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
else
clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
if (orphan_num)
set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* update SIT/NAT bitmap */ /* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
...@@ -1137,14 +1179,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1137,14 +1179,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */ /* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi); wait_on_all_pages_writeback(sbi);
/*
* invalidate meta page which is used temporarily for zeroing out
* block at the end of warm node chain.
*/
if (invalidate)
invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
discard_blk);
release_ino_entry(sbi, false); release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi))) if (unlikely(f2fs_cp_error(sbi)))
...@@ -1152,6 +1186,17 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1152,6 +1186,17 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_prefree_segments(sbi, cpc); clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY); clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
/*
* redirty superblock if metadata like node page or inode cache is
* updated during writing checkpoint.
*/
if (get_pages(sbi, F2FS_DIRTY_NODES) ||
get_pages(sbi, F2FS_DIRTY_IMETA))
set_sbi_flag(sbi, SBI_IS_DIRTY);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
return 0; return 0;
} }
...@@ -1190,6 +1235,18 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1190,6 +1235,18 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_merged_bios(sbi); f2fs_flush_merged_bios(sbi);
/* this is the case of multiple fstrims without any changes */
if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
f2fs_bug_on(sbi, prefree_segments(sbi));
flush_sit_entries(sbi, cpc);
clear_prefree_segments(sbi, cpc);
f2fs_wait_all_discard_bio(sbi);
unblock_operations(sbi);
goto out;
}
/* /*
* update checkpoint pack index * update checkpoint pack index
* Increase the version number so that * Increase the version number so that
...@@ -1205,6 +1262,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1205,6 +1262,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* unlock all the fs_lock[] in do_checkpoint() */ /* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc); err = do_checkpoint(sbi, cpc);
f2fs_wait_all_discard_bio(sbi);
unblock_operations(sbi); unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info); stat_inc_cp_count(sbi->stat_info);
......
...@@ -34,6 +34,11 @@ static void f2fs_read_end_io(struct bio *bio) ...@@ -34,6 +34,11 @@ static void f2fs_read_end_io(struct bio *bio)
struct bio_vec *bvec; struct bio_vec *bvec;
int i; int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
bio->bi_error = -EIO;
#endif
if (f2fs_bio_encrypted(bio)) { if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) { if (bio->bi_error) {
fscrypt_release_ctx(bio->bi_private); fscrypt_release_ctx(bio->bi_private);
...@@ -626,11 +631,13 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) ...@@ -626,11 +631,13 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret = 0; ssize_t ret = 0;
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from)); map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
map.m_next_pgofs = NULL; if (map.m_len > map.m_lblk)
map.m_len -= map.m_lblk;
else
map.m_len = 0;
if (f2fs_encrypted_inode(inode)) map.m_next_pgofs = NULL;
return 0;
if (iocb->ki_flags & IOCB_DIRECT) { if (iocb->ki_flags & IOCB_DIRECT) {
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
...@@ -672,6 +679,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -672,6 +679,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
bool allocated = false; bool allocated = false;
block_t blkaddr; block_t blkaddr;
if (!maxblocks)
return 0;
map->m_len = 0; map->m_len = 0;
map->m_flags = 0; map->m_flags = 0;
...@@ -783,6 +793,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -783,6 +793,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
err = reserve_new_blocks(&dn, prealloc); err = reserve_new_blocks(&dn, prealloc);
if (err) if (err)
goto sync_out; goto sync_out;
allocated = dn.node_changed;
map->m_len += dn.ofs_in_node - ofs_in_node; map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
...@@ -966,8 +977,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -966,8 +977,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret; return ret;
} }
struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr, static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages) unsigned nr_pages)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct fscrypt_ctx *ctx = NULL; struct fscrypt_ctx *ctx = NULL;
...@@ -1284,7 +1295,7 @@ static int f2fs_write_data_page(struct page *page, ...@@ -1284,7 +1295,7 @@ static int f2fs_write_data_page(struct page *page,
if (!wbc->for_reclaim) if (!wbc->for_reclaim)
need_balance_fs = true; need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0)) else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out; goto redirty_out;
err = -EAGAIN; err = -EAGAIN;
...@@ -1344,6 +1355,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, ...@@ -1344,6 +1355,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled; int cycled;
int range_whole = 0; int range_whole = 0;
int tag; int tag;
int nwritten = 0;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
...@@ -1418,6 +1430,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping, ...@@ -1418,6 +1430,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
done_index = page->index + 1; done_index = page->index + 1;
done = 1; done = 1;
break; break;
} else {
nwritten++;
} }
if (--wbc->nr_to_write <= 0 && if (--wbc->nr_to_write <= 0 &&
...@@ -1439,6 +1453,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping, ...@@ -1439,6 +1453,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index; mapping->writeback_index = done_index;
if (nwritten)
f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
NULL, 0, DATA, WRITE);
return ret; return ret;
} }
...@@ -1480,7 +1498,6 @@ static int f2fs_write_data_pages(struct address_space *mapping, ...@@ -1480,7 +1498,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
* if some pages were truncated, we cannot guarantee its mapping->host * if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios. * to detect pending bios.
*/ */
f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_inode(inode); remove_dirty_inode(inode);
return ret; return ret;
...@@ -1518,8 +1535,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, ...@@ -1518,8 +1535,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
* we already allocated all the blocks, so we don't need to get * we already allocated all the blocks, so we don't need to get
* the block addresses when there is no need to fill the page. * the block addresses when there is no need to fill the page.
*/ */
if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
len == PAGE_SIZE)
return 0; return 0;
if (f2fs_has_inline_data(inode) || if (f2fs_has_inline_data(inode) ||
...@@ -1616,7 +1632,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1616,7 +1632,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (err) if (err)
goto fail; goto fail;
if (need_balance && has_not_enough_free_secs(sbi, 0)) { if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page); unlock_page(page);
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
lock_page(page); lock_page(page);
...@@ -1633,22 +1649,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1633,22 +1649,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
if (len == PAGE_SIZE) if (len == PAGE_SIZE || PageUptodate(page))
goto out_update; return 0;
if (PageUptodate(page))
goto out_clear;
if ((pos & PAGE_MASK) >= i_size_read(inode)) {
unsigned start = pos & (PAGE_SIZE - 1);
unsigned end = start + len;
/* Reading beyond i_size is simple: memset to zero */
zero_user_segments(page, 0, start, end, PAGE_SIZE);
goto out_update;
}
if (blkaddr == NEW_ADDR) { if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE); zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else { } else {
struct bio *bio; struct bio *bio;
...@@ -1676,11 +1682,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1676,11 +1682,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
goto fail; goto fail;
} }
} }
out_update:
if (!PageUptodate(page))
SetPageUptodate(page);
out_clear:
clear_cold_data(page);
return 0; return 0;
fail: fail:
...@@ -1698,11 +1699,26 @@ static int f2fs_write_end(struct file *file, ...@@ -1698,11 +1699,26 @@ static int f2fs_write_end(struct file *file,
trace_f2fs_write_end(inode, pos, len, copied); trace_f2fs_write_end(inode, pos, len, copied);
/*
* This should be come from len == PAGE_SIZE, and we expect copied
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0.
*/
if (!PageUptodate(page)) {
if (unlikely(copied != PAGE_SIZE))
copied = 0;
else
SetPageUptodate(page);
}
if (!copied)
goto unlock_out;
set_page_dirty(page); set_page_dirty(page);
clear_cold_data(page);
if (pos + copied > i_size_read(inode)) if (pos + copied > i_size_read(inode))
f2fs_i_size_write(inode, pos + copied); f2fs_i_size_write(inode, pos + copied);
unlock_out:
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied; return copied;
...@@ -1873,6 +1889,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) ...@@ -1873,6 +1889,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block_bmap); return generic_block_bmap(mapping, block, get_data_block_bmap);
} }
#ifdef CONFIG_MIGRATION
#include <linux/migrate.h>
int f2fs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
int rc, extra_count;
struct f2fs_inode_info *fi = F2FS_I(mapping->host);
bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
BUG_ON(PageWriteback(page));
/* migrating an atomic written page is safe with the inmem_lock hold */
if (atomic_written && !mutex_trylock(&fi->inmem_lock))
return -EAGAIN;
/*
* A reference is expected if PagePrivate set when move mapping,
* however F2FS breaks this for maintaining dirty page counts when
* truncating pages. So here adjusting the 'extra_count' make it work.
*/
extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
rc = migrate_page_move_mapping(mapping, newpage,
page, NULL, mode, extra_count);
if (rc != MIGRATEPAGE_SUCCESS) {
if (atomic_written)
mutex_unlock(&fi->inmem_lock);
return rc;
}
if (atomic_written) {
struct inmem_pages *cur;
list_for_each_entry(cur, &fi->inmem_pages, list)
if (cur->page == page) {
cur->page = newpage;
break;
}
mutex_unlock(&fi->inmem_lock);
put_page(page);
get_page(newpage);
}
if (PagePrivate(page))
SetPagePrivate(newpage);
set_page_private(newpage, page_private(page));
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
#endif
const struct address_space_operations f2fs_dblock_aops = { const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page, .readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages, .readpages = f2fs_read_data_pages,
...@@ -1885,4 +1953,7 @@ const struct address_space_operations f2fs_dblock_aops = { ...@@ -1885,4 +1953,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.releasepage = f2fs_release_page, .releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO, .direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap, .bmap = f2fs_bmap,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
}; };
...@@ -45,6 +45,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -45,6 +45,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA); si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
...@@ -54,6 +55,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -54,6 +55,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->rsvd_segs = reserved_segments(sbi); si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi); si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi); si->valid_count = valid_user_blocks(sbi);
si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi); si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi); si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr); si->inline_xattr = atomic_read(&sbi->inline_xattr);
...@@ -154,7 +156,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -154,7 +156,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info); si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry); si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
if (f2fs_discard_en(sbi))
si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE; si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1) if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
...@@ -228,8 +232,13 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -228,8 +232,13 @@ static int stat_show(struct seq_file *s, void *v)
si->ssa_area_segs, si->main_area_segs); si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs); si->overp_segs, si->rsvd_segs);
seq_printf(s, "Utilization: %d%% (%d valid blocks)\n", if (test_opt(si->sbi, DISCARD))
si->utilization, si->valid_count); seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
si->utilization, si->valid_count, si->discard_blks);
else
seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
si->utilization, si->valid_count);
seq_printf(s, " - Node: %u (Inode: %u, ", seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count); si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n", seq_printf(s, "Other: %u)\n - Data: %u\n",
...@@ -311,6 +320,8 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -311,6 +320,8 @@ static int stat_show(struct seq_file *s, void *v)
si->ndirty_data, si->ndirty_files); si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4lld in %4d\n", seq_printf(s, " - meta: %4lld in %4d\n",
si->ndirty_meta, si->meta_pages); si->ndirty_meta, si->meta_pages);
seq_printf(s, " - imeta: %4lld\n",
si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits); si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d\n", seq_printf(s, " - free_nids: %9d\n",
......
...@@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level) ...@@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level)
return 4; return 4;
} }
unsigned char f2fs_filetype_table[F2FS_FT_MAX] = { static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN, [F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG, [F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR, [F2FS_FT_DIR] = DT_DIR,
...@@ -172,7 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, ...@@ -172,7 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
int max_slots; int max_slots;
f2fs_hash_t namehash; f2fs_hash_t namehash;
namehash = f2fs_dentry_hash(&name); if(fname->hash)
namehash = cpu_to_le32(fname->hash);
else
namehash = f2fs_dentry_hash(&name);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level); nblock = bucket_blocks(level);
...@@ -212,31 +215,17 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, ...@@ -212,31 +215,17 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
return de; return de;
} }
/* struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
* Find an entry in the specified directory with the wanted name. struct fscrypt_name *fname, struct page **res_page)
* It returns the page where the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
{ {
unsigned long npages = dir_blocks(dir); unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL; struct f2fs_dir_entry *de = NULL;
unsigned int max_depth; unsigned int max_depth;
unsigned int level; unsigned int level;
struct fscrypt_name fname;
int err;
err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) {
*res_page = ERR_PTR(err);
return NULL;
}
if (f2fs_has_inline_dentry(dir)) { if (f2fs_has_inline_dentry(dir)) {
*res_page = NULL; *res_page = NULL;
de = find_in_inline_dir(dir, &fname, res_page); de = find_in_inline_dir(dir, fname, res_page);
goto out; goto out;
} }
...@@ -256,11 +245,35 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, ...@@ -256,11 +245,35 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
for (level = 0; level < max_depth; level++) { for (level = 0; level < max_depth; level++) {
*res_page = NULL; *res_page = NULL;
de = find_in_level(dir, level, &fname, res_page); de = find_in_level(dir, level, fname, res_page);
if (de || IS_ERR(*res_page)) if (de || IS_ERR(*res_page))
break; break;
} }
out: out:
return de;
}
/*
* Find an entry in the specified directory with the wanted name.
* It returns the page where the entry was found (as a parameter - res_page),
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
{
struct f2fs_dir_entry *de = NULL;
struct fscrypt_name fname;
int err;
err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) {
*res_page = ERR_PTR(err);
return NULL;
}
de = __f2fs_find_entry(dir, &fname, res_page);
fscrypt_free_filename(&fname); fscrypt_free_filename(&fname);
return de; return de;
} }
...@@ -375,7 +388,8 @@ static int make_empty_dir(struct inode *inode, ...@@ -375,7 +388,8 @@ static int make_empty_dir(struct inode *inode,
} }
struct page *init_inode_metadata(struct inode *inode, struct inode *dir, struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
const struct qstr *name, struct page *dpage) const struct qstr *new_name, const struct qstr *orig_name,
struct page *dpage)
{ {
struct page *page; struct page *page;
int err; int err;
...@@ -400,7 +414,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -400,7 +414,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
if (err) if (err)
goto put_error; goto put_error;
err = f2fs_init_security(inode, dir, name, page); err = f2fs_init_security(inode, dir, orig_name, page);
if (err) if (err)
goto put_error; goto put_error;
...@@ -417,8 +431,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -417,8 +431,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page); set_cold_node(inode, page);
} }
if (name) if (new_name)
init_dent_inode(name, page); init_dent_inode(new_name, page);
/* /*
* This file should be checkpointed during fsync. * This file should be checkpointed during fsync.
...@@ -496,7 +510,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, ...@@ -496,7 +510,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
de->ino = cpu_to_le32(ino); de->ino = cpu_to_le32(ino);
set_de_type(de, mode); set_de_type(de, mode);
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
test_and_set_bit_le(bit_pos + i, (void *)d->bitmap); __set_bit_le(bit_pos + i, (void *)d->bitmap);
/* avoid wrong garbage data for readdir */ /* avoid wrong garbage data for readdir */
if (i) if (i)
(de + i)->name_len = 0; (de + i)->name_len = 0;
...@@ -504,6 +518,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, ...@@ -504,6 +518,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
} }
int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode) struct inode *inode, nid_t ino, umode_t mode)
{ {
unsigned int bit_pos; unsigned int bit_pos;
...@@ -530,7 +545,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -530,7 +545,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
start: start:
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_DIR_DEPTH)) if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
return -ENOSPC; return -ENOSPC;
#endif #endif
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
...@@ -569,7 +584,8 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -569,7 +584,8 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, new_name, NULL); page = init_inode_metadata(inode, dir, new_name,
orig_name, NULL);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
goto fail; goto fail;
...@@ -599,6 +615,26 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -599,6 +615,26 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
return err; return err;
} }
int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
struct inode *inode, nid_t ino, umode_t mode)
{
struct qstr new_name;
int err = -EAGAIN;
new_name.name = fname_name(fname);
new_name.len = fname_len(fname);
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
inode, ino, mode);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
inode, ino, mode);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
}
/* /*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and * Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op(). * f2fs_unlock_op().
...@@ -607,24 +643,15 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -607,24 +643,15 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode) struct inode *inode, nid_t ino, umode_t mode)
{ {
struct fscrypt_name fname; struct fscrypt_name fname;
struct qstr new_name;
int err; int err;
err = fscrypt_setup_filename(dir, name, 0, &fname); err = fscrypt_setup_filename(dir, name, 0, &fname);
if (err) if (err)
return err; return err;
new_name.name = fname_name(&fname); err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
new_name.len = fname_len(&fname);
err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
fscrypt_free_filename(&fname); fscrypt_free_filename(&fname);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err; return err;
} }
...@@ -634,7 +661,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) ...@@ -634,7 +661,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0; int err = 0;
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, NULL, NULL); page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
goto fail; goto fail;
...@@ -788,16 +815,9 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, ...@@ -788,16 +815,9 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
int save_len = fstr->len; int save_len = fstr->len;
int ret; int ret;
de_name.name = f2fs_kmalloc(de_name.len, GFP_NOFS);
if (!de_name.name)
return false;
memcpy(de_name.name, d->filename[bit_pos], de_name.len);
ret = fscrypt_fname_disk_to_usr(d->inode, ret = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0, (u32)de->hash_code, 0,
&de_name, fstr); &de_name, fstr);
kfree(de_name.name);
if (ret < 0) if (ret < 0)
return true; return true;
......
...@@ -46,6 +46,8 @@ enum { ...@@ -46,6 +46,8 @@ enum {
FAULT_BLOCK, FAULT_BLOCK,
FAULT_DIR_DEPTH, FAULT_DIR_DEPTH,
FAULT_EVICT_INODE, FAULT_EVICT_INODE,
FAULT_IO,
FAULT_CHECKPOINT,
FAULT_MAX, FAULT_MAX,
}; };
...@@ -55,40 +57,8 @@ struct f2fs_fault_info { ...@@ -55,40 +57,8 @@ struct f2fs_fault_info {
unsigned int inject_type; unsigned int inject_type;
}; };
extern struct f2fs_fault_info f2fs_fault;
extern char *fault_name[FAULT_MAX]; extern char *fault_name[FAULT_MAX];
#define IS_FAULT_SET(type) (f2fs_fault.inject_type & (1 << (type))) #define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
static inline bool time_to_inject(int type)
{
if (!f2fs_fault.inject_rate)
return false;
if (type == FAULT_KMALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_PAGE_ALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ALLOC_NID && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ORPHAN && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_BLOCK && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_EVICT_INODE && !IS_FAULT_SET(type))
return false;
atomic_inc(&f2fs_fault.inject_ops);
if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
atomic_set(&f2fs_fault.inject_ops, 0);
printk("%sF2FS-fs : inject %s in %pF\n",
KERN_INFO,
fault_name[type],
__builtin_return_address(0));
return true;
}
return false;
}
#endif #endif
/* /*
...@@ -158,7 +128,7 @@ enum { ...@@ -158,7 +128,7 @@ enum {
CP_DISCARD, CP_DISCARD,
}; };
#define DEF_BATCHED_TRIM_SECTIONS 32 #define DEF_BATCHED_TRIM_SECTIONS 2
#define BATCHED_TRIM_SEGMENTS(sbi) \ #define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec) (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \ #define BATCHED_TRIM_BLOCKS(sbi) \
...@@ -211,6 +181,13 @@ struct discard_entry { ...@@ -211,6 +181,13 @@ struct discard_entry {
int len; /* # of consecutive blocks of the discard */ int len; /* # of consecutive blocks of the discard */
}; };
struct bio_entry {
struct list_head list;
struct bio *bio;
struct completion event;
int error;
};
/* for the list of fsync inodes, used only during recovery */ /* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry { struct fsync_inode_entry {
struct list_head list; /* list head */ struct list_head list; /* list head */
...@@ -645,6 +622,7 @@ struct f2fs_sm_info { ...@@ -645,6 +622,7 @@ struct f2fs_sm_info {
/* for small discard management */ /* for small discard management */
struct list_head discard_list; /* 4KB discard list */ struct list_head discard_list; /* 4KB discard list */
struct list_head wait_list; /* linked with issued discard bio */
int nr_discards; /* # of discards in the list */ int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */ int max_discards; /* max. discards to be issued */
...@@ -748,6 +726,7 @@ enum { ...@@ -748,6 +726,7 @@ enum {
SBI_NEED_FSCK, /* need fsck.f2fs to fix */ SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */ SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */ SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
}; };
enum { enum {
...@@ -765,7 +744,7 @@ struct f2fs_sb_info { ...@@ -765,7 +744,7 @@ struct f2fs_sb_info {
struct proc_dir_entry *s_proc; /* proc entry */ struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */ struct f2fs_super_block *raw_super; /* raw super block pointer */
int valid_super_block; /* valid super block no */ int valid_super_block; /* valid super block no */
int s_flag; /* flags for sbi */ unsigned long s_flag; /* flags for sbi */
#ifdef CONFIG_F2FS_FS_ENCRYPTION #ifdef CONFIG_F2FS_FS_ENCRYPTION
u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE]; u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
...@@ -785,6 +764,7 @@ struct f2fs_sb_info { ...@@ -785,6 +764,7 @@ struct f2fs_sb_info {
/* for checkpoint */ /* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */ struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */ struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct rw_semaphore cp_rwsem; /* blocking FS operations */
...@@ -892,8 +872,37 @@ struct f2fs_sb_info { ...@@ -892,8 +872,37 @@ struct f2fs_sb_info {
/* Reference to checksum algorithm driver via cryptoapi */ /* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver; struct crypto_shash *s_chksum_driver;
/* For fault injection */
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info fault_info;
#endif
}; };
#ifdef CONFIG_F2FS_FAULT_INJECTION
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
struct f2fs_fault_info *ffi = &sbi->fault_info;
if (!ffi->inject_rate)
return false;
if (!IS_FAULT_SET(ffi, type))
return false;
atomic_inc(&ffi->inject_ops);
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
atomic_set(&ffi->inject_ops, 0);
printk("%sF2FS-fs : inject %s in %pF\n",
KERN_INFO,
fault_name[type],
__builtin_return_address(0));
return true;
}
return false;
}
#endif
/* For write statistics. Suppose sector size is 512 bytes, /* For write statistics. Suppose sector size is 512 bytes,
* and the return value is in kbytes. s is of struct f2fs_sb_info. * and the return value is in kbytes. s is of struct f2fs_sb_info.
*/ */
...@@ -1034,17 +1043,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) ...@@ -1034,17 +1043,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{ {
return sbi->s_flag & (0x01 << type); return test_bit(type, &sbi->s_flag);
} }
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{ {
sbi->s_flag |= (0x01 << type); set_bit(type, &sbi->s_flag);
} }
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{ {
sbi->s_flag &= ~(0x01 << type); clear_bit(type, &sbi->s_flag);
} }
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
...@@ -1052,26 +1061,57 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) ...@@ -1052,26 +1061,57 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver); return le64_to_cpu(cp->checkpoint_ver);
} }
static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{ {
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
return ckpt_flags & f; return ckpt_flags & f;
} }
static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{ {
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
}
static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags;
ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f; ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags); cp->ckpt_flags = cpu_to_le32(ckpt_flags);
} }
static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{ {
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); spin_lock(&sbi->cp_lock);
__set_ckpt_flags(F2FS_CKPT(sbi), f);
spin_unlock(&sbi->cp_lock);
}
static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags;
ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f); ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags); cp->ckpt_flags = cpu_to_le32(ckpt_flags);
} }
static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
spin_lock(&sbi->cp_lock);
__clear_ckpt_flags(F2FS_CKPT(sbi), f);
spin_unlock(&sbi->cp_lock);
}
static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
return blk_queue_discard(q);
}
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{ {
down_read(&sbi->cp_rwsem); down_read(&sbi->cp_rwsem);
...@@ -1110,8 +1150,8 @@ static inline bool __remain_node_summaries(int reason) ...@@ -1110,8 +1150,8 @@ static inline bool __remain_node_summaries(int reason)
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{ {
return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) || return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG)); is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
} }
/* /*
...@@ -1151,7 +1191,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, ...@@ -1151,7 +1191,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
blkcnt_t diff; blkcnt_t diff;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_BLOCK)) if (time_to_inject(sbi, FAULT_BLOCK))
return false; return false;
#endif #endif
/* /*
...@@ -1193,6 +1233,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, ...@@ -1193,6 +1233,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{ {
percpu_counter_inc(&sbi->nr_pages[count_type]); percpu_counter_inc(&sbi->nr_pages[count_type]);
if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES)
return;
set_sbi_flag(sbi, SBI_IS_DIRTY); set_sbi_flag(sbi, SBI_IS_DIRTY);
} }
...@@ -1243,6 +1287,11 @@ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) ...@@ -1243,6 +1287,11 @@ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
return sbi->total_valid_block_count; return sbi->total_valid_block_count;
} }
static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
{
return sbi->discard_blks;
}
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{ {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
...@@ -1376,7 +1425,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, ...@@ -1376,7 +1425,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
if (page) if (page)
return page; return page;
if (time_to_inject(FAULT_PAGE_ALLOC)) if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
return NULL; return NULL;
#endif #endif
if (!for_write) if (!for_write)
...@@ -1804,7 +1853,7 @@ static inline int f2fs_readonly(struct super_block *sb) ...@@ -1804,7 +1853,7 @@ static inline int f2fs_readonly(struct super_block *sb)
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{ {
return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
} }
static inline bool is_dot_dotdot(const struct qstr *str) static inline bool is_dot_dotdot(const struct qstr *str)
...@@ -1827,10 +1876,11 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) ...@@ -1827,10 +1876,11 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
return S_ISREG(inode->i_mode); return S_ISREG(inode->i_mode);
} }
static inline void *f2fs_kmalloc(size_t size, gfp_t flags) static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{ {
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_KMALLOC)) if (time_to_inject(sbi, FAULT_KMALLOC))
return NULL; return NULL;
#endif #endif
return kmalloc(size, flags); return kmalloc(size, flags);
...@@ -1885,6 +1935,7 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); ...@@ -1885,6 +1935,7 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
*/ */
void f2fs_set_inode_flags(struct inode *); void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long); struct inode *f2fs_iget(struct super_block *, unsigned long);
struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
int try_to_free_nats(struct f2fs_sb_info *, int); int try_to_free_nats(struct f2fs_sb_info *, int);
int update_inode(struct inode *, struct page *); int update_inode(struct inode *, struct page *);
int update_inode_page(struct inode *); int update_inode_page(struct inode *);
...@@ -1900,7 +1951,6 @@ struct dentry *f2fs_get_parent(struct dentry *child); ...@@ -1900,7 +1951,6 @@ struct dentry *f2fs_get_parent(struct dentry *child);
/* /*
* dir.c * dir.c
*/ */
extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t); void set_de_type(struct f2fs_dir_entry *, umode_t);
unsigned char get_de_type(struct f2fs_dir_entry *); unsigned char get_de_type(struct f2fs_dir_entry *);
struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *, struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
...@@ -1910,10 +1960,12 @@ bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *, ...@@ -1910,10 +1960,12 @@ bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
void do_make_empty_dir(struct inode *, struct inode *, void do_make_empty_dir(struct inode *, struct inode *,
struct f2fs_dentry_ptr *); struct f2fs_dentry_ptr *);
struct page *init_inode_metadata(struct inode *, struct inode *, struct page *init_inode_metadata(struct inode *, struct inode *,
const struct qstr *, struct page *); const struct qstr *, const struct qstr *, struct page *);
void update_parent_metadata(struct inode *, struct inode *, unsigned int); void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int); int room_for_filename(const void *, int, int);
void f2fs_drop_nlink(struct inode *, struct inode *); void f2fs_drop_nlink(struct inode *, struct inode *);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
struct page **);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *, struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **); struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
...@@ -1924,7 +1976,9 @@ int update_dent_inode(struct inode *, struct inode *, const struct qstr *); ...@@ -1924,7 +1976,9 @@ int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *, void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int); const struct qstr *, f2fs_hash_t , unsigned int);
int f2fs_add_regular_entry(struct inode *, const struct qstr *, int f2fs_add_regular_entry(struct inode *, const struct qstr *,
struct inode *, nid_t, umode_t); const struct qstr *, struct inode *, nid_t, umode_t);
int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t, int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t); umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *, void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
...@@ -2010,9 +2064,9 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *); ...@@ -2010,9 +2064,9 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t); void invalidate_blocks(struct f2fs_sb_info *, block_t);
bool is_checkpointed_data(struct f2fs_sb_info *, block_t); bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *); void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *); void release_discard_addrs(struct f2fs_sb_info *);
bool discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool); int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *); int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
...@@ -2095,6 +2149,10 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64); ...@@ -2095,6 +2149,10 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
void f2fs_set_page_dirty_nobuffers(struct page *); void f2fs_set_page_dirty_nobuffers(struct page *);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int); void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t); int f2fs_release_page(struct page *, gfp_t);
#ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
enum migrate_mode);
#endif
/* /*
* gc.c * gc.c
...@@ -2123,13 +2181,14 @@ struct f2fs_stat_info { ...@@ -2123,13 +2181,14 @@ struct f2fs_stat_info {
unsigned long long hit_largest, hit_cached, hit_rbtree; unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext; unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node; int ext_tree, zombie_tree, ext_node;
s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, inmem_pages; s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
s64 inmem_pages;
unsigned int ndirty_dirs, ndirty_files, ndirty_all; unsigned int ndirty_dirs, ndirty_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits, fnids; int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization; int total_count, utilization;
int bg_gc, wb_bios; int bg_gc, wb_bios;
int inline_xattr, inline_inode, inline_dir, orphans; int inline_xattr, inline_inode, inline_dir, orphans;
unsigned int valid_count, valid_node_count, valid_inode_count; unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks; unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid; int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs; int rsvd_segs, overp_segs;
...@@ -2294,8 +2353,8 @@ bool recover_inline_data(struct inode *, struct page *); ...@@ -2294,8 +2353,8 @@ bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
struct fscrypt_name *, struct page **); struct fscrypt_name *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *); int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *, int f2fs_add_inline_entry(struct inode *, const struct qstr *,
nid_t, umode_t); const struct qstr *, struct inode *, nid_t, umode_t);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *, void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
struct inode *, struct inode *); struct inode *, struct inode *);
bool f2fs_empty_inline_dir(struct inode *); bool f2fs_empty_inline_dir(struct inode *);
......
...@@ -135,7 +135,7 @@ static inline bool need_do_checkpoint(struct inode *inode) ...@@ -135,7 +135,7 @@ static inline bool need_do_checkpoint(struct inode *inode)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true; need_cp = true;
else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino)) else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
need_cp = true; need_cp = true;
else if (file_wrong_pino(inode)) else if (file_wrong_pino(inode))
need_cp = true; need_cp = true;
...@@ -523,7 +523,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, ...@@ -523,7 +523,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0; return 0;
if (cache_only) { if (cache_only) {
page = f2fs_grab_cache_page(mapping, index, false); page = find_lock_page(mapping, index);
if (page && PageUptodate(page)) if (page && PageUptodate(page))
goto truncate_out; goto truncate_out;
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
...@@ -1454,7 +1454,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) ...@@ -1454,7 +1454,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; unsigned int flags;
unsigned int oldflags; unsigned int oldflags;
int ret; int ret;
...@@ -1954,7 +1954,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, ...@@ -1954,7 +1954,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* avoid defragment running in SSR mode when free section are allocated * avoid defragment running in SSR mode when free section are allocated
* intensively * intensively
*/ */
if (has_not_enough_free_secs(sbi, sec_num)) { if (has_not_enough_free_secs(sbi, 0, sec_num)) {
err = -EAGAIN; err = -EAGAIN;
goto out; goto out;
} }
...@@ -2085,6 +2085,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, ...@@ -2085,6 +2085,13 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (src == dst) {
if (pos_in == pos_out)
return 0;
if (pos_out > pos_in && pos_out < pos_in + len)
return -EINVAL;
}
inode_lock(src); inode_lock(src);
if (src != dst) { if (src != dst) {
if (!inode_trylock(dst)) { if (!inode_trylock(dst)) {
...@@ -2136,8 +2143,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, ...@@ -2136,8 +2143,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
ret = __exchange_data_block(src, dst, pos_in, ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
pos_out, len >> F2FS_BLKSIZE_BITS, false); pos_out >> F2FS_BLKSIZE_BITS,
len >> F2FS_BLKSIZE_BITS, false);
if (!ret) { if (!ret) {
if (dst_max_i_size) if (dst_max_i_size)
......
...@@ -47,6 +47,11 @@ static int gc_thread_func(void *data) ...@@ -47,6 +47,11 @@ static int gc_thread_func(void *data)
continue; continue;
} }
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT))
f2fs_stop_checkpoint(sbi, false);
#endif
/* /*
* [GC triggering condition] * [GC triggering condition]
* 0. GC is not conducted currently. * 0. GC is not conducted currently.
...@@ -96,7 +101,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi) ...@@ -96,7 +101,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev; dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0; int err = 0;
gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) { if (!gc_th) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -270,7 +275,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -270,7 +275,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{ {
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p; struct victim_sel_policy p;
unsigned int secno, max_cost, last_victim; unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi); unsigned int last_segment = MAIN_SEGS(sbi);
unsigned int nsearched = 0; unsigned int nsearched = 0;
...@@ -280,7 +285,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -280,7 +285,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
select_policy(sbi, gc_type, type, &p); select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO; p.min_segno = NULL_SEGNO;
p.min_cost = max_cost = get_max_cost(sbi, &p); p.min_cost = get_max_cost(sbi, &p);
if (p.max_search == 0) if (p.max_search == 0)
goto out; goto out;
...@@ -423,10 +428,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi, ...@@ -423,10 +428,10 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
static void gc_node_segment(struct f2fs_sb_info *sbi, static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type) struct f2fs_summary *sum, unsigned int segno, int gc_type)
{ {
bool initial = true;
struct f2fs_summary *entry; struct f2fs_summary *entry;
block_t start_addr; block_t start_addr;
int off; int off;
int phase = 0;
start_addr = START_BLOCK(sbi, segno); start_addr = START_BLOCK(sbi, segno);
...@@ -439,16 +444,24 @@ static void gc_node_segment(struct f2fs_sb_info *sbi, ...@@ -439,16 +444,24 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
struct node_info ni; struct node_info ni;
/* stop BG_GC if there is not enough free sections. */ /* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return; return;
if (check_valid_map(sbi, segno, off) == 0) if (check_valid_map(sbi, segno, off) == 0)
continue; continue;
if (initial) { if (phase == 0) {
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
META_NAT, true);
continue;
}
if (phase == 1) {
ra_node_page(sbi, nid); ra_node_page(sbi, nid);
continue; continue;
} }
/* phase == 2 */
node_page = get_node_page(sbi, nid); node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page)) if (IS_ERR(node_page))
continue; continue;
...@@ -469,10 +482,8 @@ static void gc_node_segment(struct f2fs_sb_info *sbi, ...@@ -469,10 +482,8 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
stat_inc_node_blk_count(sbi, 1, gc_type); stat_inc_node_blk_count(sbi, 1, gc_type);
} }
if (initial) { if (++phase < 3)
initial = false;
goto next_step; goto next_step;
}
} }
/* /*
...@@ -706,16 +717,23 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -706,16 +717,23 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct node_info dni; /* dnode info for the data */ struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs; unsigned int ofs_in_node, nofs;
block_t start_bidx; block_t start_bidx;
nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */ /* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return; return;
if (check_valid_map(sbi, segno, off) == 0) if (check_valid_map(sbi, segno, off) == 0)
continue; continue;
if (phase == 0) { if (phase == 0) {
ra_node_page(sbi, le32_to_cpu(entry->nid)); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
META_NAT, true);
continue;
}
if (phase == 1) {
ra_node_page(sbi, nid);
continue; continue;
} }
...@@ -723,14 +741,14 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -723,14 +741,14 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
continue; continue;
if (phase == 1) { if (phase == 2) {
ra_node_page(sbi, dni.ino); ra_node_page(sbi, dni.ino);
continue; continue;
} }
ofs_in_node = le16_to_cpu(entry->ofs_in_node); ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 2) { if (phase == 3) {
inode = f2fs_iget(sb, dni.ino); inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode)) if (IS_ERR(inode) || is_bad_inode(inode))
continue; continue;
...@@ -756,7 +774,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -756,7 +774,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
continue; continue;
} }
/* phase 3 */ /* phase 4 */
inode = find_gc_inode(gc_list, dni.ino); inode = find_gc_inode(gc_list, dni.ino);
if (inode) { if (inode) {
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
...@@ -789,7 +807,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -789,7 +807,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
} }
} }
if (++phase < 4) if (++phase < 5)
goto next_step; goto next_step;
} }
...@@ -815,7 +833,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -815,7 +833,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct blk_plug plug; struct blk_plug plug;
unsigned int segno = start_segno; unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec; unsigned int end_segno = start_segno + sbi->segs_per_sec;
int seg_freed = 0; int sec_freed = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE; SUM_TYPE_DATA : SUM_TYPE_NODE;
...@@ -834,8 +852,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -834,8 +852,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
for (segno = start_segno; segno < end_segno; segno++) { for (segno = start_segno; segno < end_segno; segno++) {
if (get_valid_blocks(sbi, segno, 1) == 0) if (get_valid_blocks(sbi, segno, 1) == 0 ||
continue; unlikely(f2fs_cp_error(sbi)))
goto next;
/* find segment summary of victim */ /* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi), sum_page = find_get_page(META_MAPPING(sbi),
...@@ -861,7 +880,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -861,7 +880,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
gc_type); gc_type);
stat_inc_seg_count(sbi, type, gc_type); stat_inc_seg_count(sbi, type, gc_type);
next:
f2fs_put_page(sum_page, 0); f2fs_put_page(sum_page, 0);
} }
...@@ -871,22 +890,20 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -871,22 +890,20 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
blk_finish_plug(&plug); blk_finish_plug(&plug);
if (gc_type == FG_GC) { if (gc_type == FG_GC &&
while (start_segno < end_segno) get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
if (get_valid_blocks(sbi, start_segno++, 1) == 0) sec_freed = 1;
seg_freed++;
}
stat_inc_call_count(sbi->stat_info); stat_inc_call_count(sbi->stat_info);
return seg_freed; return sec_freed;
} }
int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
{ {
unsigned int segno; unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC; int gc_type = sync ? FG_GC : BG_GC;
int sec_freed = 0, seg_freed; int sec_freed = 0;
int ret = -EINVAL; int ret = -EINVAL;
struct cp_control cpc; struct cp_control cpc;
struct gc_inode_list gc_list = { struct gc_inode_list gc_list = {
...@@ -905,7 +922,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) ...@@ -905,7 +922,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
goto stop; goto stop;
} }
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) { if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
gc_type = FG_GC; gc_type = FG_GC;
/* /*
* If there is no victim and no prefree segment but still not * If there is no victim and no prefree segment but still not
...@@ -914,10 +931,14 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) ...@@ -914,10 +931,14 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
*/ */
if (__get_victim(sbi, &segno, gc_type) || if (__get_victim(sbi, &segno, gc_type) ||
prefree_segments(sbi)) { prefree_segments(sbi)) {
write_checkpoint(sbi, &cpc); ret = write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
segno = NULL_SEGNO; segno = NULL_SEGNO;
} else if (has_not_enough_free_secs(sbi, 0)) { } else if (has_not_enough_free_secs(sbi, 0, 0)) {
write_checkpoint(sbi, &cpc); ret = write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
} }
} }
...@@ -925,20 +946,19 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) ...@@ -925,20 +946,19 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
goto stop; goto stop;
ret = 0; ret = 0;
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
gc_type == FG_GC)
if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
sec_freed++; sec_freed++;
if (gc_type == FG_GC) if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO; sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) { if (!sync) {
if (has_not_enough_free_secs(sbi, sec_freed)) if (has_not_enough_free_secs(sbi, sec_freed, 0))
goto gc_more; goto gc_more;
if (gc_type == FG_GC) if (gc_type == FG_GC)
write_checkpoint(sbi, &cpc); ret = write_checkpoint(sbi, &cpc);
} }
stop: stop:
mutex_unlock(&sbi->gc_mutex); mutex_unlock(&sbi->gc_mutex);
......
...@@ -424,7 +424,7 @@ static int f2fs_add_inline_entries(struct inode *dir, ...@@ -424,7 +424,7 @@ static int f2fs_add_inline_entries(struct inode *dir,
ino = le32_to_cpu(de->ino); ino = le32_to_cpu(de->ino);
fake_mode = get_de_type(de) << S_SHIFT; fake_mode = get_de_type(de) << S_SHIFT;
err = f2fs_add_regular_entry(dir, &new_name, NULL, err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
ino, fake_mode); ino, fake_mode);
if (err) if (err)
goto punch_dentry_pages; goto punch_dentry_pages;
...@@ -445,8 +445,8 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, ...@@ -445,8 +445,8 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *backup_dentry; struct f2fs_inline_dentry *backup_dentry;
int err; int err;
backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry), backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
GFP_F2FS_ZERO); sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
if (!backup_dentry) { if (!backup_dentry) {
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
return -ENOMEM; return -ENOMEM;
...@@ -488,17 +488,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, ...@@ -488,17 +488,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry); return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
} }
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
struct inode *inode, nid_t ino, umode_t mode) const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage; struct page *ipage;
unsigned int bit_pos; unsigned int bit_pos;
f2fs_hash_t name_hash; f2fs_hash_t name_hash;
size_t namelen = name->len;
struct f2fs_inline_dentry *dentry_blk = NULL; struct f2fs_inline_dentry *dentry_blk = NULL;
struct f2fs_dentry_ptr d; struct f2fs_dentry_ptr d;
int slots = GET_DENTRY_SLOTS(namelen); int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL; struct page *page = NULL;
int err = 0; int err = 0;
...@@ -519,18 +519,21 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, ...@@ -519,18 +519,21 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, name, ipage); page = init_inode_metadata(inode, dir, new_name,
orig_name, ipage);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
goto fail; goto fail;
} }
if (f2fs_encrypted_inode(dir))
file_set_enc_name(inode);
} }
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true);
name_hash = f2fs_dentry_hash(name); name_hash = f2fs_dentry_hash(new_name);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage); set_page_dirty(ipage);
...@@ -563,7 +566,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, ...@@ -563,7 +566,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
inline_dentry = inline_data_addr(page); inline_dentry = inline_data_addr(page);
bit_pos = dentry - inline_dentry->dentry; bit_pos = dentry - inline_dentry->dentry;
for (i = 0; i < slots; i++) for (i = 0; i < slots; i++)
test_and_clear_bit_le(bit_pos + i, __clear_bit_le(bit_pos + i,
&inline_dentry->dentry_bitmap); &inline_dentry->dentry_bitmap);
set_page_dirty(page); set_page_dirty(page);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/f2fs_fs.h> #include <linux/f2fs_fs.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include "f2fs.h" #include "f2fs.h"
...@@ -234,6 +235,20 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) ...@@ -234,6 +235,20 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
{
struct inode *inode;
retry:
inode = f2fs_iget(sb, ino);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOMEM) {
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
}
return inode;
}
int update_inode(struct inode *inode, struct page *node_page) int update_inode(struct inode *inode, struct page *node_page)
{ {
struct f2fs_inode *ri; struct f2fs_inode *ri;
...@@ -354,7 +369,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -354,7 +369,7 @@ void f2fs_evict_inode(struct inode *inode)
goto no_delete; goto no_delete;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_EVICT_INODE)) if (time_to_inject(sbi, FAULT_EVICT_INODE))
goto no_delete; goto no_delete;
#endif #endif
......
...@@ -91,18 +91,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub) ...@@ -91,18 +91,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
{ {
size_t slen = strlen(s); size_t slen = strlen(s);
size_t sublen = strlen(sub); size_t sublen = strlen(sub);
int i;
/* /*
* filename format of multimedia file should be defined as: * filename format of multimedia file should be defined as:
* "filename + '.' + extension". * "filename + '.' + extension + (optional: '.' + temp extension)".
*/ */
if (slen < sublen + 2) if (slen < sublen + 2)
return 0; return 0;
if (s[slen - sublen - 1] != '.') for (i = 1; i < slen - sublen; i++) {
return 0; if (s[i] != '.')
continue;
if (!strncasecmp(s + i + 1, sub, sublen))
return 1;
}
return !strncasecmp(s + slen - sublen, sub, sublen); return 0;
} }
/* /*
...@@ -1010,7 +1015,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, ...@@ -1010,7 +1015,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
struct fscrypt_str cstr = FSTR_INIT(NULL, 0); struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
struct fscrypt_str pstr = FSTR_INIT(NULL, 0); struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
struct fscrypt_symlink_data *sd; struct fscrypt_symlink_data *sd;
loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
u32 max_size = inode->i_sb->s_blocksize; u32 max_size = inode->i_sb->s_blocksize;
int res; int res;
...@@ -1025,7 +1029,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, ...@@ -1025,7 +1029,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
if (IS_ERR(cpage)) if (IS_ERR(cpage))
return ERR_CAST(cpage); return ERR_CAST(cpage);
caddr = page_address(cpage); caddr = page_address(cpage);
caddr[size] = 0;
/* Symlink is encrypted */ /* Symlink is encrypted */
sd = (struct fscrypt_symlink_data *)caddr; sd = (struct fscrypt_symlink_data *)caddr;
......
...@@ -54,8 +54,6 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) ...@@ -54,8 +54,6 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
if (excess_cached_nats(sbi)) if (excess_cached_nats(sbi))
res = false; res = false;
if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD)
res = false;
} else if (type == DIRTY_DENTS) { } else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded) if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false; return false;
...@@ -1314,6 +1312,7 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -1314,6 +1312,7 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *last_page = NULL; struct page *last_page = NULL;
bool marked = false; bool marked = false;
nid_t ino = inode->i_ino; nid_t ino = inode->i_ino;
int nwritten = 0;
if (atomic) { if (atomic) {
last_page = last_fsync_dnode(sbi, ino); last_page = last_fsync_dnode(sbi, ino);
...@@ -1387,7 +1386,10 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -1387,7 +1386,10 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
unlock_page(page); unlock_page(page);
f2fs_put_page(last_page, 0); f2fs_put_page(last_page, 0);
break; break;
} else {
nwritten++;
} }
if (page == last_page) { if (page == last_page) {
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
marked = true; marked = true;
...@@ -1409,6 +1411,9 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -1409,6 +1411,9 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
unlock_page(last_page); unlock_page(last_page);
goto retry; goto retry;
} }
if (nwritten)
f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
return ret ? -EIO: 0; return ret ? -EIO: 0;
} }
...@@ -1418,6 +1423,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) ...@@ -1418,6 +1423,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
struct pagevec pvec; struct pagevec pvec;
int step = 0; int step = 0;
int nwritten = 0; int nwritten = 0;
int ret = 0;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
...@@ -1438,7 +1444,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) ...@@ -1438,7 +1444,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
if (unlikely(f2fs_cp_error(sbi))) { if (unlikely(f2fs_cp_error(sbi))) {
pagevec_release(&pvec); pagevec_release(&pvec);
return -EIO; ret = -EIO;
goto out;
} }
/* /*
...@@ -1489,6 +1496,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) ...@@ -1489,6 +1496,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page); unlock_page(page);
else
nwritten++;
if (--wbc->nr_to_write == 0) if (--wbc->nr_to_write == 0)
break; break;
...@@ -1506,7 +1515,10 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) ...@@ -1506,7 +1515,10 @@ int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
step++; step++;
goto next_step; goto next_step;
} }
return nwritten; out:
if (nwritten)
f2fs_submit_merged_bio(sbi, NODE, WRITE);
return ret;
} }
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
...@@ -1672,6 +1684,9 @@ const struct address_space_operations f2fs_node_aops = { ...@@ -1672,6 +1684,9 @@ const struct address_space_operations f2fs_node_aops = {
.set_page_dirty = f2fs_set_node_page_dirty, .set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page, .invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page, .releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif
}; };
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
...@@ -1838,7 +1853,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) ...@@ -1838,7 +1853,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *i = NULL; struct free_nid *i = NULL;
retry: retry:
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_ALLOC_NID)) if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false; return false;
#endif #endif
if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
...@@ -2015,10 +2030,12 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) ...@@ -2015,10 +2030,12 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR)) if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL; return -EINVAL;
retry:
ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
if (!ipage) if (!ipage) {
return -ENOMEM; congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
/* Should not use this inode from free nid list */ /* Should not use this inode from free nid list */
remove_free_nid(NM_I(sbi), ino); remove_free_nid(NM_I(sbi), ino);
......
...@@ -229,6 +229,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid) ...@@ -229,6 +229,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
f2fs_change_bit(block_off, nm_i->nat_bitmap); f2fs_change_bit(block_off, nm_i->nat_bitmap);
} }
static inline nid_t ino_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.ino);
}
static inline nid_t nid_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.nid);
}
static inline unsigned int ofs_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
static inline __u64 cpver_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le64_to_cpu(rn->footer.cp_ver);
}
static inline block_t next_blkaddr_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.next_blkaddr);
}
static inline void fill_node_footer(struct page *page, nid_t nid, static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset) nid_t ino, unsigned int ofs, bool reset)
{ {
...@@ -259,40 +290,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) ...@@ -259,40 +290,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{ {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page); struct f2fs_node *rn = F2FS_NODE(page);
size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
__u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
rn->footer.cp_ver = ckpt->checkpoint_ver; if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
__u64 crc = le32_to_cpu(*((__le32 *)
((unsigned char *)ckpt + crc_offset)));
cp_ver |= (crc << 32);
}
rn->footer.cp_ver = cpu_to_le64(cp_ver);
rn->footer.next_blkaddr = cpu_to_le32(blkaddr); rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
} }
static inline nid_t ino_of_node(struct page *node_page) static inline bool is_recoverable_dnode(struct page *page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.ino);
}
static inline nid_t nid_of_node(struct page *node_page)
{ {
struct f2fs_node *rn = F2FS_NODE(node_page); struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
return le32_to_cpu(rn->footer.nid); size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
} __u64 cp_ver = cur_cp_version(ckpt);
static inline unsigned int ofs_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
static inline unsigned long long cpver_of_node(struct page *node_page)
{
struct f2fs_node *rn = F2FS_NODE(node_page);
return le64_to_cpu(rn->footer.cp_ver);
}
static inline block_t next_blkaddr_of_node(struct page *node_page) if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
{ __u64 crc = le32_to_cpu(*((__le32 *)
struct f2fs_node *rn = F2FS_NODE(node_page); ((unsigned char *)ckpt + crc_offset)));
return le32_to_cpu(rn->footer.next_blkaddr); cp_ver |= (crc << 32);
}
return cpu_to_le64(cp_ver) == cpver_of_node(page);
} }
/* /*
......
...@@ -68,15 +68,17 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, ...@@ -68,15 +68,17 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL; return NULL;
} }
static struct fsync_inode_entry *add_fsync_inode(struct list_head *head, static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
struct inode *inode) struct list_head *head, nid_t ino)
{ {
struct inode *inode;
struct fsync_inode_entry *entry; struct fsync_inode_entry *entry;
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); inode = f2fs_iget_retry(sbi->sb, ino);
if (!entry) if (IS_ERR(inode))
return NULL; return ERR_CAST(inode);
entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
entry->inode = inode; entry->inode = inode;
list_add_tail(&entry->list, head); list_add_tail(&entry->list, head);
...@@ -96,48 +98,41 @@ static int recover_dentry(struct inode *inode, struct page *ipage, ...@@ -96,48 +98,41 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
struct f2fs_inode *raw_inode = F2FS_INODE(ipage); struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino); nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de; struct f2fs_dir_entry *de;
struct qstr name; struct fscrypt_name fname;
struct page *page; struct page *page;
struct inode *dir, *einode; struct inode *dir, *einode;
struct fsync_inode_entry *entry; struct fsync_inode_entry *entry;
int err = 0; int err = 0;
char *name;
entry = get_fsync_inode(dir_list, pino); entry = get_fsync_inode(dir_list, pino);
if (!entry) { if (!entry) {
dir = f2fs_iget(inode->i_sb, pino); entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
if (IS_ERR(dir)) { if (IS_ERR(entry)) {
err = PTR_ERR(dir); dir = ERR_CAST(entry);
goto out; err = PTR_ERR(entry);
}
entry = add_fsync_inode(dir_list, dir);
if (!entry) {
err = -ENOMEM;
iput(dir);
goto out; goto out;
} }
} }
dir = entry->inode; dir = entry->inode;
if (file_enc_name(inode)) memset(&fname, 0, sizeof(struct fscrypt_name));
return 0; fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
fname.disk_name.name = raw_inode->i_name;
name.len = le32_to_cpu(raw_inode->i_namelen); if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
name.name = raw_inode->i_name;
if (unlikely(name.len > F2FS_NAME_LEN)) {
WARN_ON(1); WARN_ON(1);
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
goto out; goto out;
} }
retry: retry:
de = f2fs_find_entry(dir, &name, &page); de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino)) if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put; goto out_unmap_put;
if (de) { if (de) {
einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) { if (IS_ERR(einode)) {
WARN_ON(1); WARN_ON(1);
err = PTR_ERR(einode); err = PTR_ERR(einode);
...@@ -156,18 +151,24 @@ static int recover_dentry(struct inode *inode, struct page *ipage, ...@@ -156,18 +151,24 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
} else if (IS_ERR(page)) { } else if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
} else { } else {
err = __f2fs_add_link(dir, &name, inode, err = __f2fs_do_add_link(dir, &fname, inode,
inode->i_ino, inode->i_mode); inode->i_ino, inode->i_mode);
} }
if (err == -ENOMEM)
goto retry;
goto out; goto out;
out_unmap_put: out_unmap_put:
f2fs_dentry_kunmap(dir, page); f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
out: out:
if (file_enc_name(inode))
name = "<encrypted>";
else
name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE, f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d", "%s: ino = %x, name = %s, dir = %lx, err = %d",
__func__, ino_of_node(ipage), raw_inode->i_name, __func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err); IS_ERR(dir) ? 0 : dir->i_ino, err);
return err; return err;
} }
...@@ -223,9 +224,7 @@ static bool is_same_inode(struct inode *inode, struct page *ipage) ...@@ -223,9 +224,7 @@ static bool is_same_inode(struct inode *inode, struct page *ipage)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
struct inode *inode;
struct page *page = NULL; struct page *page = NULL;
block_t blkaddr; block_t blkaddr;
int err = 0; int err = 0;
...@@ -242,7 +241,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -242,7 +241,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
page = get_tmp_page(sbi, blkaddr); page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) if (!is_recoverable_dnode(page))
break; break;
if (!is_fsync_dnode(page)) if (!is_fsync_dnode(page))
...@@ -263,23 +262,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -263,23 +262,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
* CP | dnode(F) | inode(DF) * CP | dnode(F) | inode(DF)
* For this case, we should not give up now. * For this case, we should not give up now.
*/ */
inode = f2fs_iget(sbi->sb, ino_of_node(page)); entry = add_fsync_inode(sbi, head, ino_of_node(page));
if (IS_ERR(inode)) { if (IS_ERR(entry)) {
err = PTR_ERR(inode); err = PTR_ERR(entry);
if (err == -ENOENT) { if (err == -ENOENT) {
err = 0; err = 0;
goto next; goto next;
} }
break; break;
} }
/* add this fsync inode to the list */
entry = add_fsync_inode(head, inode);
if (!entry) {
err = -ENOMEM;
iput(inode);
break;
}
} }
entry->blkaddr = blkaddr; entry->blkaddr = blkaddr;
...@@ -363,7 +354,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, ...@@ -363,7 +354,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
if (ino != dn->inode->i_ino) { if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */ /* Deallocate previous index in the node page */
inode = f2fs_iget(sbi->sb, ino); inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) if (IS_ERR(inode))
return PTR_ERR(inode); return PTR_ERR(inode);
} else { } else {
...@@ -431,10 +422,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -431,10 +422,15 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
end = start + ADDRS_PER_PAGE(page, inode); end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0); set_new_dnode(&dn, inode, NULL, NULL, 0);
retry_dn:
err = get_dnode_of_data(&dn, start, ALLOC_NODE); err = get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err) if (err) {
if (err == -ENOMEM) {
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_dn;
}
goto out; goto out;
}
f2fs_wait_on_page_writeback(dn.node_page, NODE, true); f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
...@@ -485,11 +481,16 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -485,11 +481,16 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (err) if (err)
goto err; goto err;
} }
retry_prev:
/* Check the previous node page having this index */ /* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn); err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err) if (err) {
if (err == -ENOMEM) {
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_prev;
}
goto err; goto err;
}
/* write dummy data page */ /* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest, f2fs_replace_block(sbi, &dn, src, dest,
...@@ -514,7 +515,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -514,7 +515,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *dir_list) struct list_head *dir_list)
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
struct page *page = NULL; struct page *page = NULL;
int err = 0; int err = 0;
...@@ -534,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, ...@@ -534,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
page = get_tmp_page(sbi, blkaddr); page = get_tmp_page(sbi, blkaddr);
if (cp_ver != cpver_of_node(page)) { if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
break; break;
} }
...@@ -626,38 +626,20 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) ...@@ -626,38 +626,20 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
} }
clear_sbi_flag(sbi, SBI_POR_DOING); clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) { if (err)
bool invalidate = false; set_ckpt_flags(sbi, CP_ERROR_FLAG);
mutex_unlock(&sbi->cp_mutex);
if (test_opt(sbi, LFS)) {
update_meta_page(sbi, NULL, blkaddr);
invalidate = true;
} else if (discard_next_dnode(sbi, blkaddr)) {
invalidate = true;
}
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
sync_meta_pages(sbi, META, LONG_MAX);
/* invalidate temporary meta page */ /* let's drop all the directory inodes for clean checkpoint */
if (invalidate) destroy_fsync_dnodes(&dir_list);
invalidate_mapping_pages(META_MAPPING(sbi),
blkaddr, blkaddr);
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); if (!err && need_writecp) {
mutex_unlock(&sbi->cp_mutex);
} else if (need_writecp) {
struct cp_control cpc = { struct cp_control cpc = {
.reason = CP_RECOVERY, .reason = CP_RECOVERY,
}; };
mutex_unlock(&sbi->cp_mutex);
err = write_checkpoint(sbi, &cpc); err = write_checkpoint(sbi, &cpc);
} else {
mutex_unlock(&sbi->cp_mutex);
} }
destroy_fsync_dnodes(&dir_list);
kmem_cache_destroy(fsync_entry_slab); kmem_cache_destroy(fsync_entry_slab);
return ret ? ret: err; return ret ? ret: err;
} }
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define __reverse_ffz(x) __reverse_ffs(~(x)) #define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab; static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *bio_entry_slab;
static struct kmem_cache *sit_entry_set_slab; static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab; static struct kmem_cache *inmem_entry_slab;
...@@ -344,6 +345,11 @@ int commit_inmem_pages(struct inode *inode) ...@@ -344,6 +345,11 @@ int commit_inmem_pages(struct inode *inode)
*/ */
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{ {
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT))
f2fs_stop_checkpoint(sbi, false);
#endif
if (!need) if (!need)
return; return;
...@@ -355,7 +361,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) ...@@ -355,7 +361,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* We should do GC or end up with checkpoint, if there are so many dirty * We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments. * dir/node pages without enough free segments.
*/ */
if (has_not_enough_free_secs(sbi, 0)) { if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex); mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, false); f2fs_gc(sbi, false);
} }
...@@ -580,6 +586,74 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -580,6 +586,74 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock); mutex_unlock(&dirty_i->seglist_lock);
} }
static struct bio_entry *__add_bio_entry(struct f2fs_sb_info *sbi,
struct bio *bio)
{
struct list_head *wait_list = &(SM_I(sbi)->wait_list);
struct bio_entry *be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
INIT_LIST_HEAD(&be->list);
be->bio = bio;
init_completion(&be->event);
list_add_tail(&be->list, wait_list);
return be;
}
void f2fs_wait_all_discard_bio(struct f2fs_sb_info *sbi)
{
struct list_head *wait_list = &(SM_I(sbi)->wait_list);
struct bio_entry *be, *tmp;
list_for_each_entry_safe(be, tmp, wait_list, list) {
struct bio *bio = be->bio;
int err;
wait_for_completion_io(&be->event);
err = be->error;
if (err == -EOPNOTSUPP)
err = 0;
if (err)
f2fs_msg(sbi->sb, KERN_INFO,
"Issue discard failed, ret: %d", err);
bio_put(bio);
list_del(&be->list);
kmem_cache_free(bio_entry_slab, be);
}
}
static void f2fs_submit_bio_wait_endio(struct bio *bio)
{
struct bio_entry *be = (struct bio_entry *)bio->bi_private;
be->error = bio->bi_error;
complete(&be->event);
}
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio = NULL;
int err;
err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
&bio);
if (!err && bio) {
struct bio_entry *be = __add_bio_entry(sbi, bio);
bio->bi_private = be;
bio->bi_end_io = f2fs_submit_bio_wait_endio;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
}
return err;
}
static int f2fs_issue_discard(struct f2fs_sb_info *sbi, static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
block_t blkstart, block_t blklen) block_t blkstart, block_t blklen)
{ {
...@@ -597,29 +671,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi, ...@@ -597,29 +671,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
sbi->discard_blks--; sbi->discard_blks--;
} }
trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
}
bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
{
int err = -EOPNOTSUPP;
if (test_opt(sbi, DISCARD)) {
struct seg_entry *se = get_seg_entry(sbi,
GET_SEGNO(sbi, blkaddr));
unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
if (f2fs_test_bit(offset, se->discard_map))
return false;
err = f2fs_issue_discard(sbi, blkaddr, 1);
}
if (err) {
update_meta_page(sbi, NULL, blkaddr);
return true;
}
return false;
} }
static void __add_discard_entry(struct f2fs_sb_info *sbi, static void __add_discard_entry(struct f2fs_sb_info *sbi,
...@@ -660,7 +712,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -660,7 +712,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
bool force = (cpc->reason == CP_DISCARD); bool force = (cpc->reason == CP_DISCARD);
int i; int i;
if (se->valid_blocks == max_blocks) if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
return; return;
if (!force) { if (!force) {
...@@ -719,11 +771,14 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -719,11 +771,14 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct list_head *head = &(SM_I(sbi)->discard_list); struct list_head *head = &(SM_I(sbi)->discard_list);
struct discard_entry *entry, *this; struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct blk_plug plug;
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1; unsigned int start = 0, end = -1;
unsigned int secno, start_segno; unsigned int secno, start_segno;
bool force = (cpc->reason == CP_DISCARD); bool force = (cpc->reason == CP_DISCARD);
blk_start_plug(&plug);
mutex_lock(&dirty_i->seglist_lock); mutex_lock(&dirty_i->seglist_lock);
while (1) { while (1) {
...@@ -772,6 +827,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -772,6 +827,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
SM_I(sbi)->nr_discards -= entry->len; SM_I(sbi)->nr_discards -= entry->len;
kmem_cache_free(discard_entry_slab, entry); kmem_cache_free(discard_entry_slab, entry);
} }
blk_finish_plug(&plug);
} }
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
...@@ -818,12 +875,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) ...@@ -818,12 +875,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (del > 0) { if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
if (!f2fs_test_and_set_bit(offset, se->discard_map)) if (f2fs_discard_en(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--; sbi->discard_blks--;
} else { } else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
if (f2fs_test_and_clear_bit(offset, se->discard_map)) if (f2fs_discard_en(sbi) &&
f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++; sbi->discard_blks++;
} }
if (!f2fs_test_bit(offset, se->ckpt_valid_map)) if (!f2fs_test_bit(offset, se->ckpt_valid_map))
...@@ -1202,7 +1261,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) ...@@ -1202,7 +1261,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type); struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
return v_ops->get_victim(sbi, return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR); &(curseg)->next_segno, BG_GC, type, SSR);
...@@ -1277,6 +1336,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -1277,6 +1336,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (end <= MAIN_BLKADDR(sbi)) if (end <= MAIN_BLKADDR(sbi))
goto out; goto out;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
f2fs_msg(sbi->sb, KERN_WARNING,
"Found FS corruption, run fsck to fix.");
goto out;
}
/* start/end segment number in main_area */ /* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
...@@ -1301,6 +1366,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -1301,6 +1366,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
mutex_lock(&sbi->gc_mutex); mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc); err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex); mutex_unlock(&sbi->gc_mutex);
if (err)
break;
schedule();
} }
out: out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
...@@ -1391,7 +1460,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -1391,7 +1460,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
/* direct_io'ed data is aligned to the segment for better performance */ /* direct_io'ed data is aligned to the segment for better performance */
if (direct_io && curseg->next_blkoff && if (direct_io && curseg->next_blkoff &&
!has_not_enough_free_secs(sbi, 0)) !has_not_enough_free_secs(sbi, 0, 0))
__allocate_new_segments(sbi, type); __allocate_new_segments(sbi, type);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
...@@ -1589,11 +1658,9 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, ...@@ -1589,11 +1658,9 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
{ {
struct page *cpage; struct page *cpage;
if (blkaddr == NEW_ADDR) if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return; return;
f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
cpage = find_lock_page(META_MAPPING(sbi), blkaddr); cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) { if (cpage) {
f2fs_wait_on_page_writeback(cpage, DATA, true); f2fs_wait_on_page_writeback(cpage, DATA, true);
...@@ -1739,7 +1806,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) ...@@ -1739,7 +1806,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
int type = CURSEG_HOT_DATA; int type = CURSEG_HOT_DATA;
int err; int err;
if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true); int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2) if (npages >= 2)
...@@ -1836,7 +1903,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi, ...@@ -1836,7 +1903,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{ {
if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk); write_compacted_summaries(sbi, start_blk);
else else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
...@@ -2127,12 +2194,16 @@ static int build_sit_info(struct f2fs_sb_info *sbi) ...@@ -2127,12 +2194,16 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].discard_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map || if (!sit_i->sentries[start].cur_valid_map ||
!sit_i->sentries[start].ckpt_valid_map || !sit_i->sentries[start].ckpt_valid_map)
!sit_i->sentries[start].discard_map)
return -ENOMEM; return -ENOMEM;
if (f2fs_discard_en(sbi)) {
sit_i->sentries[start].discard_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].discard_map)
return -ENOMEM;
}
} }
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
...@@ -2239,6 +2310,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) ...@@ -2239,6 +2310,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
struct sit_info *sit_i = SIT_I(sbi); struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_journal *journal = curseg->journal; struct f2fs_journal *journal = curseg->journal;
struct seg_entry *se;
struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi); int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end; unsigned int i, start, end;
unsigned int readed, start_blk = 0; unsigned int readed, start_blk = 0;
...@@ -2251,41 +2324,58 @@ static void build_sit_entries(struct f2fs_sb_info *sbi) ...@@ -2251,41 +2324,58 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
end = (start_blk + readed) * sit_i->sents_per_block; end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) { for (; start < end && start < MAIN_SEGS(sbi); start++) {
struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk; struct f2fs_sit_block *sit_blk;
struct f2fs_sit_entry sit;
struct page *page; struct page *page;
down_read(&curseg->journal_rwsem); se = &sit_i->sentries[start];
for (i = 0; i < sits_in_cursum(journal); i++) {
if (le32_to_cpu(segno_in_journal(journal, i))
== start) {
sit = sit_in_journal(journal, i);
up_read(&curseg->journal_rwsem);
goto got_it;
}
}
up_read(&curseg->journal_rwsem);
page = get_current_sit_page(sbi, start); page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page); sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
got_it:
check_block_count(sbi, start, &sit); check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit); seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */ /* build discard map only one time */
memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); if (f2fs_discard_en(sbi)) {
sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks; memcpy(se->discard_map, se->cur_valid_map,
SIT_VBLOCK_MAP_SIZE);
if (sbi->segs_per_sec > 1) { sbi->discard_blks += sbi->blocks_per_seg -
struct sec_entry *e = get_sec_entry(sbi, start); se->valid_blocks;
e->valid_blocks += se->valid_blocks;
} }
if (sbi->segs_per_sec > 1)
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
} }
start_blk += readed; start_blk += readed;
} while (start_blk < sit_blk_cnt); } while (start_blk < sit_blk_cnt);
down_read(&curseg->journal_rwsem);
for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int old_valid_blocks;
start = le32_to_cpu(segno_in_journal(journal, i));
se = &sit_i->sentries[start];
sit = sit_in_journal(journal, i);
old_valid_blocks = se->valid_blocks;
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
if (f2fs_discard_en(sbi)) {
memcpy(se->discard_map, se->cur_valid_map,
SIT_VBLOCK_MAP_SIZE);
sbi->discard_blks += old_valid_blocks -
se->valid_blocks;
}
if (sbi->segs_per_sec > 1)
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks - old_valid_blocks;
}
up_read(&curseg->journal_rwsem);
} }
static void init_free_segmap(struct f2fs_sb_info *sbi) static void init_free_segmap(struct f2fs_sb_info *sbi)
...@@ -2427,6 +2517,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi) ...@@ -2427,6 +2517,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
INIT_LIST_HEAD(&sm_info->discard_list); INIT_LIST_HEAD(&sm_info->discard_list);
INIT_LIST_HEAD(&sm_info->wait_list);
sm_info->nr_discards = 0; sm_info->nr_discards = 0;
sm_info->max_discards = 0; sm_info->max_discards = 0;
...@@ -2570,10 +2661,15 @@ int __init create_segment_manager_caches(void) ...@@ -2570,10 +2661,15 @@ int __init create_segment_manager_caches(void)
if (!discard_entry_slab) if (!discard_entry_slab)
goto fail; goto fail;
bio_entry_slab = f2fs_kmem_cache_create("bio_entry",
sizeof(struct bio_entry));
if (!bio_entry_slab)
goto destroy_discard_entry;
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set)); sizeof(struct sit_entry_set));
if (!sit_entry_set_slab) if (!sit_entry_set_slab)
goto destory_discard_entry; goto destroy_bio_entry;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages)); sizeof(struct inmem_pages));
...@@ -2583,7 +2679,9 @@ int __init create_segment_manager_caches(void) ...@@ -2583,7 +2679,9 @@ int __init create_segment_manager_caches(void)
destroy_sit_entry_set: destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab); kmem_cache_destroy(sit_entry_set_slab);
destory_discard_entry: destroy_bio_entry:
kmem_cache_destroy(bio_entry_slab);
destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(discard_entry_slab);
fail: fail:
return -ENOMEM; return -ENOMEM;
...@@ -2592,6 +2690,7 @@ int __init create_segment_manager_caches(void) ...@@ -2592,6 +2690,7 @@ int __init create_segment_manager_caches(void)
void destroy_segment_manager_caches(void) void destroy_segment_manager_caches(void)
{ {
kmem_cache_destroy(sit_entry_set_slab); kmem_cache_destroy(sit_entry_set_slab);
kmem_cache_destroy(bio_entry_slab);
kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(discard_entry_slab);
kmem_cache_destroy(inmem_entry_slab); kmem_cache_destroy(inmem_entry_slab);
} }
...@@ -479,7 +479,8 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi) ...@@ -479,7 +479,8 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
reserved_sections(sbi) + 1); reserved_sections(sbi) + 1);
} }
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{ {
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
...@@ -489,8 +490,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) ...@@ -489,8 +490,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false; return false;
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + return (free_sections(sbi) + freed) <=
reserved_sections(sbi)); (node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
} }
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
...@@ -587,8 +588,8 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -587,8 +588,8 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{ {
f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi) BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
|| blk_addr >= MAX_BLKADDR(sbi)); || blk_addr >= MAX_BLKADDR(sbi));
} }
/* /*
......
...@@ -40,7 +40,6 @@ static struct kmem_cache *f2fs_inode_cachep; ...@@ -40,7 +40,6 @@ static struct kmem_cache *f2fs_inode_cachep;
static struct kset *f2fs_kset; static struct kset *f2fs_kset;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info f2fs_fault;
char *fault_name[FAULT_MAX] = { char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc", [FAULT_KMALLOC] = "kmalloc",
...@@ -50,16 +49,21 @@ char *fault_name[FAULT_MAX] = { ...@@ -50,16 +49,21 @@ char *fault_name[FAULT_MAX] = {
[FAULT_BLOCK] = "no more block", [FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth", [FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail", [FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
}; };
static void f2fs_build_fault_attr(unsigned int rate) static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
unsigned int rate)
{ {
struct f2fs_fault_info *ffi = &sbi->fault_info;
if (rate) { if (rate) {
atomic_set(&f2fs_fault.inject_ops, 0); atomic_set(&ffi->inject_ops, 0);
f2fs_fault.inject_rate = rate; ffi->inject_rate = rate;
f2fs_fault.inject_type = (1 << FAULT_MAX) - 1; ffi->inject_type = (1 << FAULT_MAX) - 1;
} else { } else {
memset(&f2fs_fault, 0, sizeof(struct f2fs_fault_info)); memset(ffi, 0, sizeof(struct f2fs_fault_info));
} }
} }
#endif #endif
...@@ -87,6 +91,7 @@ enum { ...@@ -87,6 +91,7 @@ enum {
Opt_inline_xattr, Opt_inline_xattr,
Opt_inline_data, Opt_inline_data,
Opt_inline_dentry, Opt_inline_dentry,
Opt_noinline_dentry,
Opt_flush_merge, Opt_flush_merge,
Opt_noflush_merge, Opt_noflush_merge,
Opt_nobarrier, Opt_nobarrier,
...@@ -118,6 +123,7 @@ static match_table_t f2fs_tokens = { ...@@ -118,6 +123,7 @@ static match_table_t f2fs_tokens = {
{Opt_inline_xattr, "inline_xattr"}, {Opt_inline_xattr, "inline_xattr"},
{Opt_inline_data, "inline_data"}, {Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"}, {Opt_inline_dentry, "inline_dentry"},
{Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"}, {Opt_flush_merge, "flush_merge"},
{Opt_noflush_merge, "noflush_merge"}, {Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"}, {Opt_nobarrier, "nobarrier"},
...@@ -167,7 +173,7 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) ...@@ -167,7 +173,7 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
else if (struct_type == FAULT_INFO_RATE || else if (struct_type == FAULT_INFO_RATE ||
struct_type == FAULT_INFO_TYPE) struct_type == FAULT_INFO_TYPE)
return (unsigned char *)&f2fs_fault; return (unsigned char *)&sbi->fault_info;
#endif #endif
return NULL; return NULL;
} }
...@@ -312,6 +318,10 @@ static struct attribute *f2fs_attrs[] = { ...@@ -312,6 +318,10 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dirty_nats_ratio), ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval), ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval), ATTR_LIST(idle_interval),
#ifdef CONFIG_F2FS_FAULT_INJECTION
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
#endif
ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(lifetime_write_kbytes),
NULL, NULL,
}; };
...@@ -327,22 +337,6 @@ static struct kobj_type f2fs_ktype = { ...@@ -327,22 +337,6 @@ static struct kobj_type f2fs_ktype = {
.release = f2fs_sb_release, .release = f2fs_sb_release,
}; };
#ifdef CONFIG_F2FS_FAULT_INJECTION
/* sysfs for f2fs fault injection */
static struct kobject f2fs_fault_inject;
static struct attribute *f2fs_fault_attrs[] = {
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
NULL
};
static struct kobj_type f2fs_fault_ktype = {
.default_attrs = f2fs_fault_attrs,
.sysfs_ops = &f2fs_attr_ops,
};
#endif
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{ {
struct va_format vaf; struct va_format vaf;
...@@ -370,10 +364,6 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -370,10 +364,6 @@ static int parse_options(struct super_block *sb, char *options)
char *p, *name; char *p, *name;
int arg = 0; int arg = 0;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(0);
#endif
if (!options) if (!options)
return 0; return 0;
...@@ -488,6 +478,9 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -488,6 +478,9 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_inline_dentry: case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY); set_opt(sbi, INLINE_DENTRY);
break; break;
case Opt_noinline_dentry:
clear_opt(sbi, INLINE_DENTRY);
break;
case Opt_flush_merge: case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE); set_opt(sbi, FLUSH_MERGE);
break; break;
...@@ -533,7 +526,7 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -533,7 +526,7 @@ static int parse_options(struct super_block *sb, char *options)
if (args->from && match_int(args, &arg)) if (args->from && match_int(args, &arg))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(arg); f2fs_build_fault_attr(sbi, arg);
#else #else
f2fs_msg(sb, KERN_INFO, f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected"); "FAULT_INJECTION was not selected");
...@@ -730,7 +723,7 @@ static void f2fs_put_super(struct super_block *sb) ...@@ -730,7 +723,7 @@ static void f2fs_put_super(struct super_block *sb)
* clean checkpoint again. * clean checkpoint again.
*/ */
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) { !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = { struct cp_control cpc = {
.reason = CP_UMOUNT, .reason = CP_UMOUNT,
}; };
...@@ -878,6 +871,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) ...@@ -878,6 +871,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noinline_data"); seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY)) if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry"); seq_puts(seq, ",inline_dentry");
else
seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE)) if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge"); seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER)) if (test_opt(sbi, NOBARRIER))
...@@ -946,7 +941,7 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset) ...@@ -946,7 +941,7 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%d|%-3u|", se->type, seq_printf(seq, "%d|%-3u|", se->type,
get_valid_blocks(sbi, i, 1)); get_valid_blocks(sbi, i, 1));
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, "%x ", se->cur_valid_map[j]); seq_printf(seq, " %.2x", se->cur_valid_map[j]);
seq_putc(seq, '\n'); seq_putc(seq, '\n');
} }
return 0; return 0;
...@@ -975,6 +970,7 @@ static void default_options(struct f2fs_sb_info *sbi) ...@@ -975,6 +970,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC); set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE); set_opt(sbi, EXTENT_CACHE);
sbi->sb->s_flags |= MS_LAZYTIME; sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE); set_opt(sbi, FLUSH_MERGE);
...@@ -991,6 +987,10 @@ static void default_options(struct f2fs_sb_info *sbi) ...@@ -991,6 +987,10 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_F2FS_FS_POSIX_ACL #ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL); set_opt(sbi, POSIX_ACL);
#endif #endif
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, 0);
#endif
} }
static int f2fs_remount(struct super_block *sb, int *flags, char *data) static int f2fs_remount(struct super_block *sb, int *flags, char *data)
...@@ -1001,6 +1001,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -1001,6 +1001,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false; bool need_restart_gc = false;
bool need_stop_gc = false; bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info ffi = sbi->fault_info;
#endif
/* /*
* Save the old mount options in case we * Save the old mount options in case we
...@@ -1096,6 +1099,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -1096,6 +1099,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
restore_opts: restore_opts:
sbi->mount_opt = org_mount_opt; sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs; sbi->active_logs = active_logs;
#ifdef CONFIG_F2FS_FAULT_INJECTION
sbi->fault_info = ffi;
#endif
return err; return err;
} }
...@@ -1469,6 +1475,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -1469,6 +1475,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
mutex_init(&sbi->umount_mutex); mutex_init(&sbi->umount_mutex);
mutex_init(&sbi->wio_mutex[NODE]); mutex_init(&sbi->wio_mutex[NODE]);
mutex_init(&sbi->wio_mutex[DATA]); mutex_init(&sbi->wio_mutex[DATA]);
spin_lock_init(&sbi->cp_lock);
#ifdef CONFIG_F2FS_FS_ENCRYPTION #ifdef CONFIG_F2FS_FS_ENCRYPTION
memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX, memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
...@@ -1810,7 +1817,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1810,7 +1817,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
* previous checkpoint was not done by clean system shutdown. * previous checkpoint was not done by clean system shutdown.
*/ */
if (bdev_read_only(sb->s_bdev) && if (bdev_read_only(sb->s_bdev) &&
!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS; err = -EROFS;
goto free_kobj; goto free_kobj;
} }
...@@ -1818,6 +1825,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1818,6 +1825,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (need_fsck) if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
if (!retry)
goto skip_recovery;
err = recover_fsync_data(sbi, false); err = recover_fsync_data(sbi, false);
if (err < 0) { if (err < 0) {
need_fsck = true; need_fsck = true;
...@@ -1835,7 +1845,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1835,7 +1845,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_kobj; goto free_kobj;
} }
} }
skip_recovery:
/* recover_fsync_data() cleared this already */ /* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING); clear_sbi_flag(sbi, SBI_POR_DOING);
...@@ -1879,7 +1889,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1879,7 +1889,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
dput(sb->s_root); dput(sb->s_root);
sb->s_root = NULL; sb->s_root = NULL;
free_node_inode: free_node_inode:
truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex); mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi); f2fs_leave_shrinker(sbi);
iput(sbi->node_inode); iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex); mutex_unlock(&sbi->umount_mutex);
...@@ -1978,16 +1990,6 @@ static int __init init_f2fs_fs(void) ...@@ -1978,16 +1990,6 @@ static int __init init_f2fs_fs(void)
err = -ENOMEM; err = -ENOMEM;
goto free_extent_cache; goto free_extent_cache;
} }
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_fault_inject.kset = f2fs_kset;
f2fs_build_fault_attr(0);
err = kobject_init_and_add(&f2fs_fault_inject, &f2fs_fault_ktype,
NULL, "fault_injection");
if (err) {
f2fs_fault_inject.kset = NULL;
goto free_kset;
}
#endif
err = register_shrinker(&f2fs_shrinker_info); err = register_shrinker(&f2fs_shrinker_info);
if (err) if (err)
goto free_kset; goto free_kset;
...@@ -2006,10 +2008,6 @@ static int __init init_f2fs_fs(void) ...@@ -2006,10 +2008,6 @@ static int __init init_f2fs_fs(void)
free_shrinker: free_shrinker:
unregister_shrinker(&f2fs_shrinker_info); unregister_shrinker(&f2fs_shrinker_info);
free_kset: free_kset:
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (f2fs_fault_inject.kset)
kobject_put(&f2fs_fault_inject);
#endif
kset_unregister(f2fs_kset); kset_unregister(f2fs_kset);
free_extent_cache: free_extent_cache:
destroy_extent_cache(); destroy_extent_cache();
...@@ -2031,9 +2029,6 @@ static void __exit exit_f2fs_fs(void) ...@@ -2031,9 +2029,6 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_root_stats(); f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type); unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info); unregister_shrinker(&f2fs_shrinker_info);
#ifdef CONFIG_F2FS_FAULT_INJECTION
kobject_put(&f2fs_fault_inject);
#endif
kset_unregister(f2fs_kset); kset_unregister(f2fs_kset);
destroy_extent_cache(); destroy_extent_cache();
destroy_checkpoint_caches(); destroy_checkpoint_caches();
......
...@@ -217,18 +217,20 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, ...@@ -217,18 +217,20 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
return entry; return entry;
} }
static void *read_all_xattrs(struct inode *inode, struct page *ipage) static int read_all_xattrs(struct inode *inode, struct page *ipage,
void **base_addr)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header; struct f2fs_xattr_header *header;
size_t size = PAGE_SIZE, inline_size = 0; size_t size = PAGE_SIZE, inline_size = 0;
void *txattr_addr; void *txattr_addr;
int err;
inline_size = inline_xattr_size(inode); inline_size = inline_xattr_size(inode);
txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO); txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
if (!txattr_addr) if (!txattr_addr)
return NULL; return -ENOMEM;
/* read from inline xattr */ /* read from inline xattr */
if (inline_size) { if (inline_size) {
...@@ -239,8 +241,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage) ...@@ -239,8 +241,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
inline_addr = inline_xattr_addr(ipage); inline_addr = inline_xattr_addr(ipage);
} else { } else {
page = get_node_page(sbi, inode->i_ino); page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail; goto fail;
}
inline_addr = inline_xattr_addr(page); inline_addr = inline_xattr_addr(page);
} }
memcpy(txattr_addr, inline_addr, inline_size); memcpy(txattr_addr, inline_addr, inline_size);
...@@ -254,8 +258,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage) ...@@ -254,8 +258,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
/* The inode already has an extended attribute block. */ /* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
if (IS_ERR(xpage)) if (IS_ERR(xpage)) {
err = PTR_ERR(xpage);
goto fail; goto fail;
}
xattr_addr = page_address(xpage); xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE); memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
...@@ -269,10 +275,11 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage) ...@@ -269,10 +275,11 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC); header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1); header->h_refcount = cpu_to_le32(1);
} }
return txattr_addr; *base_addr = txattr_addr;
return 0;
fail: fail:
kzfree(txattr_addr); kzfree(txattr_addr);
return NULL; return err;
} }
static inline int write_all_xattrs(struct inode *inode, __u32 hsize, static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
...@@ -366,9 +373,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, ...@@ -366,9 +373,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN) if (len > F2FS_NAME_LEN)
return -ERANGE; return -ERANGE;
base_addr = read_all_xattrs(inode, ipage); error = read_all_xattrs(inode, ipage, &base_addr);
if (!base_addr) if (error)
return -ENOMEM; return error;
entry = __find_xattr(base_addr, index, len, name); entry = __find_xattr(base_addr, index, len, name);
if (IS_XATTR_LAST_ENTRY(entry)) { if (IS_XATTR_LAST_ENTRY(entry)) {
...@@ -402,9 +409,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) ...@@ -402,9 +409,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error = 0; int error = 0;
size_t rest = buffer_size; size_t rest = buffer_size;
base_addr = read_all_xattrs(inode, NULL); error = read_all_xattrs(inode, NULL, &base_addr);
if (!base_addr) if (error)
return -ENOMEM; return error;
list_for_each_xattr(entry, base_addr) { list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler = const struct xattr_handler *handler =
...@@ -463,9 +470,9 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -463,9 +470,9 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode)) if (size > MAX_VALUE_LEN(inode))
return -E2BIG; return -E2BIG;
base_addr = read_all_xattrs(inode, ipage); error = read_all_xattrs(inode, ipage, &base_addr);
if (!base_addr) if (error)
return -ENOMEM; return error;
/* find entry with wanted name. */ /* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name); here = __find_xattr(base_addr, index, len, name);
...@@ -548,6 +555,8 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -548,6 +555,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode); f2fs_set_encrypted_inode(inode);
f2fs_mark_inode_dirty_sync(inode); f2fs_mark_inode_dirty_sync(inode);
if (!error && S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit: exit:
kzfree(base_addr); kzfree(base_addr);
return error; return error;
......
...@@ -100,6 +100,7 @@ struct f2fs_super_block { ...@@ -100,6 +100,7 @@ struct f2fs_super_block {
/* /*
* For checkpoint * For checkpoint
*/ */
#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020 #define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010 #define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008 #define CP_ERROR_FLAG 0x00000008
......
...@@ -58,16 +58,12 @@ TRACE_DEFINE_ENUM(CP_DISCARD); ...@@ -58,16 +58,12 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA)) #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
#define show_bio_type(op, op_flags) show_bio_op(op), \ #define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
show_bio_op_flags(op_flags), show_bio_extra(op_flags) show_bio_extra(op_flags)
#define show_bio_op(op) \
__print_symbolic(op, \
{ READ, "READ" }, \
{ WRITE, "WRITE" })
#define show_bio_op_flags(flags) \ #define show_bio_op_flags(flags) \
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
{ 0, "WRITE" }, \
{ REQ_RAHEAD, "READAHEAD" }, \ { REQ_RAHEAD, "READAHEAD" }, \
{ READ_SYNC, "READ_SYNC" }, \ { READ_SYNC, "READ_SYNC" }, \
{ WRITE_SYNC, "WRITE_SYNC" }, \ { WRITE_SYNC, "WRITE_SYNC" }, \
...@@ -754,12 +750,12 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, ...@@ -754,12 +750,12 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
), ),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s", "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s%s, type = %s",
show_dev_ino(__entry), show_dev_ino(__entry),
(unsigned long)__entry->index, (unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->old_blkaddr,
(unsigned long long)__entry->new_blkaddr, (unsigned long long)__entry->new_blkaddr,
show_bio_type(__entry->op, __entry->op_flags), show_bio_type(__entry->op_flags),
show_block_type(__entry->type)) show_block_type(__entry->type))
); );
...@@ -806,9 +802,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, ...@@ -806,9 +802,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
__entry->size = bio->bi_iter.bi_size; __entry->size = bio->bi_iter.bi_size;
), ),
TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u", TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
show_dev(__entry), show_dev(__entry),
show_bio_type(__entry->op, __entry->op_flags), show_bio_type(__entry->op_flags),
show_block_type(__entry->type), show_block_type(__entry->type),
(unsigned long long)__entry->sector, (unsigned long long)__entry->sector,
__entry->size) __entry->size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment