Commit 9ab97aea authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'f2fs-for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've focused on bug fixes since Pixel devices have
  been shipping with f2fs. Some of them were related to hardware
  encryption support which are actually not an issue in mainline, but
  would be better to merge them in order to avoid potential bugs.

  Enhancements:
   - do GC sub-sections when the section is large
   - add a flag in ioctl(SHUTDOWN) to trigger fsck for QA
   - use kvmalloc() in order to give another chance to avoid ENOMEM

  Bug fixes:
   - fix accessing memory boundaries in a malformed iamge
   - GC gives stale unencrypted block
   - GC counts in large sections
   - detect idle time more precisely
   - block allocation of DIO writes
   - race conditions between write_begin and write_checkpoint
   - allow GCs for node segments via ioctl()

  There are various clean-ups and minor bug fixes as well"

* tag 'f2fs-for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (43 commits)
  f2fs: sanity check of xattr entry size
  f2fs: fix use-after-free issue when accessing sbi->stat_info
  f2fs: check PageWriteback flag for ordered case
  f2fs: fix validation of the block count in sanity_check_raw_super
  f2fs: fix missing unlock(sbi->gc_mutex)
  f2fs: fix to dirty inode synchronously
  f2fs: clean up structure extent_node
  f2fs: fix block address for __check_sit_bitmap
  f2fs: fix sbi->extent_list corruption issue
  f2fs: clean up checkpoint flow
  f2fs: flush stale issued discard candidates
  f2fs: correct wrong spelling, issing_*
  f2fs: use kvmalloc, if kmalloc is failed
  f2fs: remove redundant comment of unused wio_mutex
  f2fs: fix to reorder set_page_dirty and wait_on_page_writeback
  f2fs: clear PG_writeback if IPU failed
  f2fs: add an ioctl() to explicitly trigger fsck later
  f2fs: avoid frequent costly fsck triggers
  f2fs: fix m_may_create to make OPU DIO write correctly
  f2fs: fix to update new block address correctly for OPU
  ...
parents 19530313 64beba05
...@@ -92,6 +92,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com> ...@@ -92,6 +92,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description: Description:
Controls the number of trials to find a victim segment. Controls the number of trials to find a victim segment.
What: /sys/fs/f2fs/<disk>/migration_granularity
Date: October 2018
Contact: "Chao Yu" <yuchao0@huawei.com>
Description:
Controls migration granularity of garbage collection on large
section, it can let GC move partial segment{s} of one section
in one GC cycle, so that dispersing heavy overhead GC to
multiple lightweight one.
What: /sys/fs/f2fs/<disk>/dir_level What: /sys/fs/f2fs/<disk>/dir_level
Date: March 2014 Date: March 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com> Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
......
...@@ -160,7 +160,7 @@ static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi, ...@@ -160,7 +160,7 @@ static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
return (void *)f2fs_acl; return (void *)f2fs_acl;
fail: fail:
kfree(f2fs_acl); kvfree(f2fs_acl);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -190,7 +190,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, ...@@ -190,7 +190,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
acl = NULL; acl = NULL;
else else
acl = ERR_PTR(retval); acl = ERR_PTR(retval);
kfree(value); kvfree(value);
return acl; return acl;
} }
...@@ -240,7 +240,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, ...@@ -240,7 +240,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0); error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
kfree(value); kvfree(value);
if (!error) if (!error)
set_cached_acl(inode, type, acl); set_cached_acl(inode, type, acl);
...@@ -352,12 +352,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode, ...@@ -352,12 +352,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return PTR_ERR(p); return PTR_ERR(p);
clone = f2fs_acl_clone(p, GFP_NOFS); clone = f2fs_acl_clone(p, GFP_NOFS);
if (!clone) if (!clone) {
goto no_mem; ret = -ENOMEM;
goto release_acl;
}
ret = f2fs_acl_create_masq(clone, mode); ret = f2fs_acl_create_masq(clone, mode);
if (ret < 0) if (ret < 0)
goto no_mem_clone; goto release_clone;
if (ret == 0) if (ret == 0)
posix_acl_release(clone); posix_acl_release(clone);
...@@ -371,11 +373,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode, ...@@ -371,11 +373,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return 0; return 0;
no_mem_clone: release_clone:
posix_acl_release(clone); posix_acl_release(clone);
no_mem: release_acl:
posix_acl_release(p); posix_acl_release(p);
return -ENOMEM; return ret;
} }
int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
......
...@@ -44,7 +44,7 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -44,7 +44,7 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
cond_resched(); cond_resched();
goto repeat; goto repeat;
} }
f2fs_wait_on_page_writeback(page, META, true); f2fs_wait_on_page_writeback(page, META, true, true);
if (!PageUptodate(page)) if (!PageUptodate(page))
SetPageUptodate(page); SetPageUptodate(page);
return page; return page;
...@@ -370,9 +370,8 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, ...@@ -370,9 +370,8 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
goto continue_unlock; goto continue_unlock;
} }
f2fs_wait_on_page_writeback(page, META, true); f2fs_wait_on_page_writeback(page, META, true, true);
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page)) if (!clear_page_dirty_for_io(page))
goto continue_unlock; goto continue_unlock;
...@@ -911,7 +910,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) ...@@ -911,7 +910,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
f2fs_put_page(cp1, 1); f2fs_put_page(cp1, 1);
f2fs_put_page(cp2, 1); f2fs_put_page(cp2, 1);
fail_no_cp: fail_no_cp:
kfree(sbi->ckpt); kvfree(sbi->ckpt);
return -EINVAL; return -EINVAL;
} }
...@@ -1290,11 +1289,11 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi, ...@@ -1290,11 +1289,11 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
struct page *page = f2fs_grab_meta_page(sbi, blk_addr); struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
int err; int err;
f2fs_wait_on_page_writeback(page, META, true, true);
memcpy(page_address(page), src, PAGE_SIZE); memcpy(page_address(page), src, PAGE_SIZE);
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, META, true); set_page_dirty(page);
f2fs_bug_on(sbi, PageWriteback(page));
if (unlikely(!clear_page_dirty_for_io(page))) if (unlikely(!clear_page_dirty_for_io(page)))
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
...@@ -1328,11 +1327,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1328,11 +1327,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
int err; int err;
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) { f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
if (unlikely(f2fs_cp_error(sbi))) !f2fs_cp_error(sbi));
break;
}
/* /*
* modify checkpoint * modify checkpoint
...@@ -1405,14 +1402,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1405,14 +1402,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
for (i = 0; i < nm_i->nat_bits_blocks; i++) for (i = 0; i < nm_i->nat_bits_blocks; i++)
f2fs_update_meta_page(sbi, nm_i->nat_bits + f2fs_update_meta_page(sbi, nm_i->nat_bits +
(i << F2FS_BLKSIZE_BITS), blk + i); (i << F2FS_BLKSIZE_BITS), blk + i);
/* Flush all the NAT BITS pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
f2fs_sync_meta_pages(sbi, META, LONG_MAX,
FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
break;
}
} }
/* write out checkpoint buffer at block 0 */ /* write out checkpoint buffer at block 0 */
...@@ -1448,6 +1437,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1448,6 +1437,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Here, we have one bio having CP pack except cp pack 2 page */ /* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
!f2fs_cp_error(sbi));
/* wait for previous submitted meta pages writeback */ /* wait for previous submitted meta pages writeback */
f2fs_wait_on_all_pages_writeback(sbi); f2fs_wait_on_all_pages_writeback(sbi);
...@@ -1465,7 +1456,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1465,7 +1456,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* invalidate intermediate page cache borrowed from meta inode * invalidate intermediate page cache borrowed from meta inode
* which are used for migration of encrypted inode's blocks. * which are used for migration of encrypted inode's blocks.
*/ */
if (f2fs_sb_has_encrypt(sbi->sb)) if (f2fs_sb_has_encrypt(sbi))
invalidate_mapping_pages(META_MAPPING(sbi), invalidate_mapping_pages(META_MAPPING(sbi),
MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
......
This diff is collapsed.
...@@ -53,6 +53,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -53,6 +53,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->vw_cnt = atomic_read(&sbi->vw_cnt); si->vw_cnt = atomic_read(&sbi->vw_cnt);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt); si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt); si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA); si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA); si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA); si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
...@@ -62,7 +64,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -62,7 +64,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nr_flushed = si->nr_flushed =
atomic_read(&SM_I(sbi)->fcc_info->issued_flush); atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
si->nr_flushing = si->nr_flushing =
atomic_read(&SM_I(sbi)->fcc_info->issing_flush); atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
si->flush_list_empty = si->flush_list_empty =
llist_empty(&SM_I(sbi)->fcc_info->issue_list); llist_empty(&SM_I(sbi)->fcc_info->issue_list);
} }
...@@ -70,7 +72,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -70,7 +72,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nr_discarded = si->nr_discarded =
atomic_read(&SM_I(sbi)->dcc_info->issued_discard); atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
si->nr_discarding = si->nr_discarding =
atomic_read(&SM_I(sbi)->dcc_info->issing_discard); atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
si->nr_discard_cmd = si->nr_discard_cmd =
atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt); atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks; si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
...@@ -197,7 +199,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -197,7 +199,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE; si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1) if (__is_large_section(sbi))
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP); si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
...@@ -374,6 +376,8 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -374,6 +376,8 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node); si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n"); seq_puts(s, "\nBalancing F2FS Async:\n");
seq_printf(s, " - DIO (R: %4d, W: %4d)\n",
si->nr_dio_read, si->nr_dio_write);
seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n", seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta); si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), " seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
...@@ -444,18 +448,7 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -444,18 +448,7 @@ static int stat_show(struct seq_file *s, void *v)
return 0; return 0;
} }
static int stat_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(stat);
{
return single_open(file, stat_show, inode->i_private);
}
static const struct file_operations stat_fops = {
.owner = THIS_MODULE,
.open = stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int f2fs_build_stats(struct f2fs_sb_info *sbi) int f2fs_build_stats(struct f2fs_sb_info *sbi)
{ {
...@@ -510,7 +503,7 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi) ...@@ -510,7 +503,7 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
list_del(&si->stat_list); list_del(&si->stat_list);
mutex_unlock(&f2fs_stat_mutex); mutex_unlock(&f2fs_stat_mutex);
kfree(si); kvfree(si);
} }
int __init f2fs_create_root_stats(void) int __init f2fs_create_root_stats(void)
......
...@@ -293,7 +293,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, ...@@ -293,7 +293,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
{ {
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA; enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page); lock_page(page);
f2fs_wait_on_page_writeback(page, type, true); f2fs_wait_on_page_writeback(page, type, true, true);
de->ino = cpu_to_le32(inode->i_ino); de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode); set_de_type(de, inode->i_mode);
set_page_dirty(page); set_page_dirty(page);
...@@ -307,7 +307,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage) ...@@ -307,7 +307,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
{ {
struct f2fs_inode *ri; struct f2fs_inode *ri;
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
/* copy name info. to this inode page */ /* copy name info. to this inode page */
ri = F2FS_INODE(ipage); ri = F2FS_INODE(ipage);
...@@ -550,7 +550,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -550,7 +550,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
++level; ++level;
goto start; goto start;
add_dentry: add_dentry:
f2fs_wait_on_page_writeback(dentry_page, DATA, true); f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
...@@ -705,7 +705,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, ...@@ -705,7 +705,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
return f2fs_delete_inline_entry(dentry, page, dir, inode); return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page); lock_page(page);
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
dentry_blk = page_address(page); dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry; bit_pos = dentry - dentry_blk->dentry;
...@@ -808,6 +808,17 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, ...@@ -808,6 +808,17 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
de_name.name = d->filename[bit_pos]; de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len); de_name.len = le16_to_cpu(de->name_len);
/* check memory boundary before moving forward */
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
if (unlikely(bit_pos > d->max)) {
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: corrupted namelen=%d, run fsck to fix.",
__func__, le16_to_cpu(de->name_len));
set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EINVAL;
goto out;
}
if (f2fs_encrypted_inode(d->inode)) { if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len; int save_len = fstr->len;
...@@ -830,7 +841,6 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, ...@@ -830,7 +841,6 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (readdir_ra) if (readdir_ra)
f2fs_ra_node_page(sbi, le32_to_cpu(de->ino)); f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos; ctx->pos = start_pos + bit_pos;
} }
out: out:
......
...@@ -67,7 +67,7 @@ struct f2fs_fault_info { ...@@ -67,7 +67,7 @@ struct f2fs_fault_info {
unsigned int inject_type; unsigned int inject_type;
}; };
extern char *f2fs_fault_name[FAULT_MAX]; extern const char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif #endif
...@@ -152,12 +152,13 @@ struct f2fs_mount_info { ...@@ -152,12 +152,13 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */ #define F2FS_FEATURE_VERITY 0x0400 /* reserved */
#define F2FS_FEATURE_SB_CHKSUM 0x0800 #define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_HAS_FEATURE(sb, mask) \ #define __F2FS_HAS_FEATURE(raw_super, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) ((raw_super->feature & cpu_to_le32(mask)) != 0)
#define F2FS_SET_FEATURE(sb, mask) \ #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
(F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)) #define F2FS_SET_FEATURE(sbi, mask) \
#define F2FS_CLEAR_FEATURE(sb, mask) \ (sbi->raw_super->feature |= cpu_to_le32(mask))
(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)) #define F2FS_CLEAR_FEATURE(sbi, mask) \
(sbi->raw_super->feature &= ~cpu_to_le32(mask))
/* /*
* Default values for user and/or group using reserved blocks * Default values for user and/or group using reserved blocks
...@@ -284,7 +285,7 @@ struct discard_cmd { ...@@ -284,7 +285,7 @@ struct discard_cmd {
struct block_device *bdev; /* bdev */ struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */ unsigned short ref; /* reference count */
unsigned char state; /* state */ unsigned char state; /* state */
unsigned char issuing; /* issuing discard */ unsigned char queued; /* queued discard */
int error; /* bio error */ int error; /* bio error */
spinlock_t lock; /* for state/bio_ref updating */ spinlock_t lock; /* for state/bio_ref updating */
unsigned short bio_ref; /* bio reference count */ unsigned short bio_ref; /* bio reference count */
...@@ -326,7 +327,7 @@ struct discard_cmd_control { ...@@ -326,7 +327,7 @@ struct discard_cmd_control {
unsigned int undiscard_blks; /* # of undiscard blocks */ unsigned int undiscard_blks; /* # of undiscard blocks */
unsigned int next_pos; /* next discard position */ unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */ atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */ atomic_t queued_discard; /* # of queued discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */ atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root_cached root; /* root of discard rb-tree */ struct rb_root_cached root; /* root of discard rb-tree */
bool rbtree_check; /* config for consistence check */ bool rbtree_check; /* config for consistence check */
...@@ -416,6 +417,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, ...@@ -416,6 +417,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */ #define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
#define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */ #define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
#define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
#define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */
#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* /*
...@@ -557,16 +559,8 @@ struct extent_info { ...@@ -557,16 +559,8 @@ struct extent_info {
}; };
struct extent_node { struct extent_node {
struct rb_node rb_node; struct rb_node rb_node; /* rb node located in rb-tree */
union { struct extent_info ei; /* extent info */
struct {
unsigned int fofs;
unsigned int len;
u32 blk;
};
struct extent_info ei; /* extent info */
};
struct list_head list; /* node in global extent list of sbi */ struct list_head list; /* node in global extent list of sbi */
struct extent_tree *et; /* extent tree pointer */ struct extent_tree *et; /* extent tree pointer */
}; };
...@@ -601,6 +595,7 @@ struct f2fs_map_blocks { ...@@ -601,6 +595,7 @@ struct f2fs_map_blocks {
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
pgoff_t *m_next_extent; /* point to next possible extent */ pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type; int m_seg_type;
bool m_may_create; /* indicate it is from write path */
}; };
/* for flag in get_data_block */ /* for flag in get_data_block */
...@@ -889,7 +884,7 @@ struct flush_cmd_control { ...@@ -889,7 +884,7 @@ struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */ struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
atomic_t issued_flush; /* # of issued flushes */ atomic_t issued_flush; /* # of issued flushes */
atomic_t issing_flush; /* # of issing flushes */ atomic_t queued_flush; /* # of queued flushes */
struct llist_head issue_list; /* list for command issue */ struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */ struct llist_node *dispatch_list; /* list for command dispatch */
}; };
...@@ -956,6 +951,8 @@ enum count_type { ...@@ -956,6 +951,8 @@ enum count_type {
F2FS_RD_DATA, F2FS_RD_DATA,
F2FS_RD_NODE, F2FS_RD_NODE,
F2FS_RD_META, F2FS_RD_META,
F2FS_DIO_WRITE,
F2FS_DIO_READ,
NR_COUNT_TYPE, NR_COUNT_TYPE,
}; };
...@@ -1170,8 +1167,6 @@ struct f2fs_sb_info { ...@@ -1170,8 +1167,6 @@ struct f2fs_sb_info {
/* for bio operations */ /* for bio operations */
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
/* bio ordering for NODE/DATA */
/* keep migration IO order for LFS mode */ /* keep migration IO order for LFS mode */
struct rw_semaphore io_order_lock; struct rw_semaphore io_order_lock;
mempool_t *write_io_dummy; /* Dummy pages */ mempool_t *write_io_dummy; /* Dummy pages */
...@@ -1263,6 +1258,7 @@ struct f2fs_sb_info { ...@@ -1263,6 +1258,7 @@ struct f2fs_sb_info {
struct f2fs_gc_kthread *gc_thread; /* GC thread */ struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */ unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */ unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
/* for skip statistic */ /* for skip statistic */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */ unsigned long long skipped_gc_rwsem; /* FG_GC only */
...@@ -1272,6 +1268,8 @@ struct f2fs_sb_info { ...@@ -1272,6 +1268,8 @@ struct f2fs_sb_info {
/* maximum # of trials to find a victim segment for SSR and GC */ /* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search; unsigned int max_victim_search;
/* migration granularity of garbage collection, unit: segment */
unsigned int migration_granularity;
/* /*
* for stat information. * for stat information.
...@@ -1330,6 +1328,13 @@ struct f2fs_sb_info { ...@@ -1330,6 +1328,13 @@ struct f2fs_sb_info {
__u32 s_chksum_seed; __u32 s_chksum_seed;
}; };
struct f2fs_private_dio {
struct inode *inode;
void *orig_private;
bio_end_io_t *orig_end_io;
bool write;
};
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(type) \ #define f2fs_show_injection_info(type) \
printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \ printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \
...@@ -1608,12 +1613,16 @@ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) ...@@ -1608,12 +1613,16 @@ static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
{ {
unsigned long flags; unsigned long flags;
set_sbi_flag(sbi, SBI_NEED_FSCK); /*
* In order to re-enable nat_bits we need to call fsck.f2fs by
* set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
* so let's rely on regular fsck or unclean shutdown.
*/
if (lock) if (lock)
spin_lock_irqsave(&sbi->cp_lock, flags); spin_lock_irqsave(&sbi->cp_lock, flags);
__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
kfree(NM_I(sbi)->nat_bits); kvfree(NM_I(sbi)->nat_bits);
NM_I(sbi)->nat_bits = NULL; NM_I(sbi)->nat_bits = NULL;
if (lock) if (lock)
spin_unlock_irqrestore(&sbi->cp_lock, flags); spin_unlock_irqrestore(&sbi->cp_lock, flags);
...@@ -2146,7 +2155,11 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type) ...@@ -2146,7 +2155,11 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{ {
if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
get_pages(sbi, F2FS_WB_CP_DATA)) get_pages(sbi, F2FS_WB_CP_DATA) ||
get_pages(sbi, F2FS_DIO_READ) ||
get_pages(sbi, F2FS_DIO_WRITE) ||
atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
return false; return false;
return f2fs_time_over(sbi, type); return f2fs_time_over(sbi, type);
} }
...@@ -2370,6 +2383,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, ...@@ -2370,6 +2383,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_NEW_INODE: case FI_NEW_INODE:
if (set) if (set)
return; return;
/* fall through */
case FI_DATA_EXIST: case FI_DATA_EXIST:
case FI_INLINE_DOTS: case FI_INLINE_DOTS:
case FI_PIN_FILE: case FI_PIN_FILE:
...@@ -2672,22 +2686,37 @@ static inline bool is_dot_dotdot(const struct qstr *str) ...@@ -2672,22 +2686,37 @@ static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode) static inline bool f2fs_may_extent_tree(struct inode *inode)
{ {
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) || struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!test_opt(sbi, EXTENT_CACHE) ||
is_inode_flag_set(inode, FI_NO_EXTENT)) is_inode_flag_set(inode, FI_NO_EXTENT))
return false; return false;
/*
* for recovered files during mount do not create extents
* if shrinker is not registered.
*/
if (list_empty(&sbi->s_list))
return false;
return S_ISREG(inode->i_mode); return S_ISREG(inode->i_mode);
} }
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags) size_t size, gfp_t flags)
{ {
void *ret;
if (time_to_inject(sbi, FAULT_KMALLOC)) { if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(FAULT_KMALLOC); f2fs_show_injection_info(FAULT_KMALLOC);
return NULL; return NULL;
} }
return kmalloc(size, flags); ret = kmalloc(size, flags);
if (ret)
return ret;
return kvmalloc(size, flags);
} }
static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
...@@ -2762,6 +2791,8 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, ...@@ -2762,6 +2791,8 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
spin_unlock(&sbi->iostat_lock); spin_unlock(&sbi->iostat_lock);
} }
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \ #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
(!is_read_io(fio->op) || fio->is_meta)) (!is_read_io(fio->op) || fio->is_meta))
...@@ -3007,7 +3038,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -3007,7 +3038,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_summary *sum, int type, struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio, bool add_list); struct f2fs_io_info *fio, bool add_list);
void f2fs_wait_on_page_writeback(struct page *page, void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered); enum page_type type, bool ordered, bool locked);
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len); block_t len);
...@@ -3147,6 +3178,7 @@ struct f2fs_stat_info { ...@@ -3147,6 +3178,7 @@ struct f2fs_stat_info {
int total_count, utilization; int total_count, utilization;
int bg_gc, nr_wb_cp_data, nr_wb_data; int bg_gc, nr_wb_cp_data, nr_wb_data;
int nr_rd_data, nr_rd_node, nr_rd_meta; int nr_rd_data, nr_rd_node, nr_rd_meta;
int nr_dio_read, nr_dio_write;
unsigned int io_skip_bggc, other_skip_bggc; unsigned int io_skip_bggc, other_skip_bggc;
int nr_flushing, nr_flushed, flush_list_empty; int nr_flushing, nr_flushed, flush_list_empty;
int nr_discarding, nr_discarded; int nr_discarding, nr_discarded;
...@@ -3459,9 +3491,9 @@ static inline bool f2fs_post_read_required(struct inode *inode) ...@@ -3459,9 +3491,9 @@ static inline bool f2fs_post_read_required(struct inode *inode)
} }
#define F2FS_FEATURE_FUNCS(name, flagname) \ #define F2FS_FEATURE_FUNCS(name, flagname) \
static inline int f2fs_sb_has_##name(struct super_block *sb) \ static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
{ \ { \
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \ return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
} }
F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
...@@ -3491,7 +3523,7 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi, ...@@ -3491,7 +3523,7 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{ {
return f2fs_sb_has_blkzoned(sbi->sb); return f2fs_sb_has_blkzoned(sbi);
} }
static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
...@@ -3566,7 +3598,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, ...@@ -3566,7 +3598,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
* for blkzoned device, fallback direct IO to buffered IO, so * for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write. * all IOs can be serialized by log-structured write.
*/ */
if (f2fs_sb_has_blkzoned(sbi->sb)) if (f2fs_sb_has_blkzoned(sbi))
return true; return true;
if (test_opt(sbi, LFS) && (rw == WRITE) && if (test_opt(sbi, LFS) && (rw == WRITE) &&
block_unaligned_IO(inode, iocb, iter)) block_unaligned_IO(inode, iocb, iter))
...@@ -3589,7 +3621,7 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, ...@@ -3589,7 +3621,7 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
{ {
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
if (f2fs_sb_has_quota_ino(sbi->sb)) if (f2fs_sb_has_quota_ino(sbi))
return true; return true;
if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
......
...@@ -82,7 +82,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) ...@@ -82,7 +82,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
} }
/* fill the page */ /* fill the page */
f2fs_wait_on_page_writeback(page, DATA, false); f2fs_wait_on_page_writeback(page, DATA, false, true);
/* wait for GCed page writeback via META_MAPPING */ /* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
...@@ -216,6 +216,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, ...@@ -216,6 +216,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
trace_f2fs_sync_file_enter(inode); trace_f2fs_sync_file_enter(inode);
if (S_ISDIR(inode->i_mode))
goto go_write;
/* if fdatasync is triggered, let's do in-place-update */ /* if fdatasync is triggered, let's do in-place-update */
if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
set_inode_flag(inode, FI_NEED_IPU); set_inode_flag(inode, FI_NEED_IPU);
...@@ -575,7 +578,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, ...@@ -575,7 +578,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
if (IS_ERR(page)) if (IS_ERR(page))
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
truncate_out: truncate_out:
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
zero_user(page, offset, PAGE_SIZE - offset); zero_user(page, offset, PAGE_SIZE - offset);
/* An encrypted inode should have a key and truncate the last page. */ /* An encrypted inode should have a key and truncate the last page. */
...@@ -696,7 +699,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat, ...@@ -696,7 +699,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
unsigned int flags; unsigned int flags;
if (f2fs_has_extra_attr(inode) && if (f2fs_has_extra_attr(inode) &&
f2fs_sb_has_inode_crtime(inode->i_sb) && f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
stat->result_mask |= STATX_BTIME; stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = fi->i_crtime.tv_sec; stat->btime.tv_sec = fi->i_crtime.tv_sec;
...@@ -892,7 +895,7 @@ static int fill_zero(struct inode *inode, pgoff_t index, ...@@ -892,7 +895,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (IS_ERR(page)) if (IS_ERR(page))
return PTR_ERR(page); return PTR_ERR(page);
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
zero_user(page, start, len); zero_user(page, start, len);
set_page_dirty(page); set_page_dirty(page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
...@@ -1496,7 +1499,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1496,7 +1499,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_map_blocks map = { .m_next_pgofs = NULL, struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE }; .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
.m_may_create = true };
pgoff_t pg_end; pgoff_t pg_end;
loff_t new_size = i_size_read(inode); loff_t new_size = i_size_read(inode);
loff_t off_end; loff_t off_end;
...@@ -1681,7 +1685,7 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) ...@@ -1681,7 +1685,7 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
inode->i_ctime = current_time(inode); inode->i_ctime = current_time(inode);
f2fs_set_inode_flags(inode); f2fs_set_inode_flags(inode);
f2fs_mark_inode_dirty_sync(inode, false); f2fs_mark_inode_dirty_sync(inode, true);
return 0; return 0;
} }
...@@ -1962,6 +1966,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ...@@ -1962,6 +1966,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
f2fs_stop_checkpoint(sbi, false); f2fs_stop_checkpoint(sbi, false);
set_sbi_flag(sbi, SBI_IS_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break; break;
case F2FS_GOING_DOWN_NEED_FSCK:
set_sbi_flag(sbi, SBI_NEED_FSCK);
/* do checkpoint only */
ret = f2fs_sync_fs(sb, 1);
if (ret)
goto out;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -2030,7 +2041,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) ...@@ -2030,7 +2041,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
if (!f2fs_sb_has_encrypt(inode->i_sb)) if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
...@@ -2040,7 +2051,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) ...@@ -2040,7 +2051,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{ {
if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb)) if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
return -EOPNOTSUPP; return -EOPNOTSUPP;
return fscrypt_ioctl_get_policy(filp, (void __user *)arg); return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
} }
...@@ -2051,7 +2062,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) ...@@ -2051,7 +2062,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err; int err;
if (!f2fs_sb_has_encrypt(inode->i_sb)) if (!f2fs_sb_has_encrypt(sbi))
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = mnt_want_write_file(filp); err = mnt_want_write_file(filp);
...@@ -2155,7 +2166,7 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) ...@@ -2155,7 +2166,7 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
} }
ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
range.start += sbi->blocks_per_seg; range.start += BLKS_PER_SEC(sbi);
if (range.start <= end) if (range.start <= end)
goto do_more; goto do_more;
out: out:
...@@ -2197,7 +2208,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, ...@@ -2197,7 +2208,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_map_blocks map = { .m_next_extent = NULL, struct f2fs_map_blocks map = { .m_next_extent = NULL,
.m_seg_type = NO_CHECK_TYPE }; .m_seg_type = NO_CHECK_TYPE ,
.m_may_create = false };
struct extent_info ei = {0, 0, 0}; struct extent_info ei = {0, 0, 0};
pgoff_t pg_start, pg_end, next_pgofs; pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg; unsigned int blk_per_seg = sbi->blocks_per_seg;
...@@ -2560,7 +2572,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) ...@@ -2560,7 +2572,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
return -EFAULT; return -EFAULT;
if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
sbi->segs_per_sec != 1) { __is_large_section(sbi)) {
f2fs_msg(sbi->sb, KERN_WARNING, f2fs_msg(sbi->sb, KERN_WARNING,
"Can't flush %u in %d for segs_per_sec %u != 1\n", "Can't flush %u in %d for segs_per_sec %u != 1\n",
range.dev_num, sbi->s_ndevs, range.dev_num, sbi->s_ndevs,
...@@ -2635,12 +2647,11 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid) ...@@ -2635,12 +2647,11 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
struct page *ipage; struct page *ipage;
kprojid_t kprojid; kprojid_t kprojid;
int err; int err;
if (!f2fs_sb_has_project_quota(sb)) { if (!f2fs_sb_has_project_quota(sbi)) {
if (projid != F2FS_DEF_PROJID) if (projid != F2FS_DEF_PROJID)
return -EOPNOTSUPP; return -EOPNOTSUPP;
else else
...@@ -2757,7 +2768,7 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) ...@@ -2757,7 +2768,7 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
F2FS_FL_USER_VISIBLE); F2FS_FL_USER_VISIBLE);
if (f2fs_sb_has_project_quota(inode->i_sb)) if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
fi->i_projid); fi->i_projid);
...@@ -2932,6 +2943,7 @@ int f2fs_precache_extents(struct inode *inode) ...@@ -2932,6 +2943,7 @@ int f2fs_precache_extents(struct inode *inode)
map.m_next_pgofs = NULL; map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent; map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE; map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
end = F2FS_I_SB(inode)->max_file_blocks; end = F2FS_I_SB(inode)->max_file_blocks;
while (map.m_lblk < end) { while (map.m_lblk < end) {
......
...@@ -142,7 +142,7 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) ...@@ -142,7 +142,7 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) { if (IS_ERR(gc_th->f2fs_gc_task)) {
err = PTR_ERR(gc_th->f2fs_gc_task); err = PTR_ERR(gc_th->f2fs_gc_task);
kfree(gc_th); kvfree(gc_th);
sbi->gc_thread = NULL; sbi->gc_thread = NULL;
} }
out: out:
...@@ -155,7 +155,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) ...@@ -155,7 +155,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
if (!gc_th) if (!gc_th)
return; return;
kthread_stop(gc_th->f2fs_gc_task); kthread_stop(gc_th->f2fs_gc_task);
kfree(gc_th); kvfree(gc_th);
sbi->gc_thread = NULL; sbi->gc_thread = NULL;
} }
...@@ -323,8 +323,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -323,8 +323,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
p.min_cost = get_max_cost(sbi, &p); p.min_cost = get_max_cost(sbi, &p);
if (*result != NULL_SEGNO) { if (*result != NULL_SEGNO) {
if (IS_DATASEG(get_seg_entry(sbi, *result)->type) && if (get_valid_blocks(sbi, *result, false) &&
get_valid_blocks(sbi, *result, false) &&
!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
p.min_segno = *result; p.min_segno = *result;
goto out; goto out;
...@@ -333,6 +332,22 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -333,6 +332,22 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (p.max_search == 0) if (p.max_search == 0)
goto out; goto out;
if (__is_large_section(sbi) && p.alloc_mode == LFS) {
if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
p.min_segno = sbi->next_victim_seg[BG_GC];
*result = p.min_segno;
sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
goto got_result;
}
if (gc_type == FG_GC &&
sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
p.min_segno = sbi->next_victim_seg[FG_GC];
*result = p.min_segno;
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
goto got_result;
}
}
last_victim = sm->last_victim[p.gc_mode]; last_victim = sm->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) { if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi); p.min_segno = check_bg_victims(sbi);
...@@ -395,6 +410,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -395,6 +410,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
} }
if (p.min_segno != NULL_SEGNO) { if (p.min_segno != NULL_SEGNO) {
got_it: got_it:
*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
got_result:
if (p.alloc_mode == LFS) { if (p.alloc_mode == LFS) {
secno = GET_SEC_FROM_SEG(sbi, p.min_segno); secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
if (gc_type == FG_GC) if (gc_type == FG_GC)
...@@ -402,13 +419,13 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, ...@@ -402,13 +419,13 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
else else
set_bit(secno, dirty_i->victim_secmap); set_bit(secno, dirty_i->victim_secmap);
} }
*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
}
out:
if (p.min_segno != NULL_SEGNO)
trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
sbi->cur_victim_sec, sbi->cur_victim_sec,
prefree_segments(sbi), free_segments(sbi)); prefree_segments(sbi), free_segments(sbi));
}
out:
mutex_unlock(&dirty_i->seglist_lock); mutex_unlock(&dirty_i->seglist_lock);
return (p.min_segno == NULL_SEGNO) ? 0 : 1; return (p.min_segno == NULL_SEGNO) ? 0 : 1;
...@@ -658,6 +675,14 @@ static int ra_data_block(struct inode *inode, pgoff_t index) ...@@ -658,6 +675,14 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
fio.page = page; fio.page = page;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
/*
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
f2fs_wait_on_page_writeback(page, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
dn.data_blkaddr, dn.data_blkaddr,
FGP_LOCK | FGP_CREAT, GFP_NOFS); FGP_LOCK | FGP_CREAT, GFP_NOFS);
...@@ -743,7 +768,9 @@ static int move_data_block(struct inode *inode, block_t bidx, ...@@ -743,7 +768,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
* don't cache encrypted data into meta inode until previous dirty * don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush. * data were writebacked to avoid racing between GC and flush.
*/ */
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
if (err) if (err)
...@@ -802,8 +829,8 @@ static int move_data_block(struct inode *inode, block_t bidx, ...@@ -802,8 +829,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
} }
write_page: write_page:
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
set_page_dirty(fio.encrypted_page); set_page_dirty(fio.encrypted_page);
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page)) if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META); dec_page_count(fio.sbi, F2FS_DIRTY_META);
...@@ -811,7 +838,7 @@ static int move_data_block(struct inode *inode, block_t bidx, ...@@ -811,7 +838,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
ClearPageError(page); ClearPageError(page);
/* allocate block address */ /* allocate block address */
f2fs_wait_on_page_writeback(dn.node_page, NODE, true); f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
fio.op = REQ_OP_WRITE; fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC; fio.op_flags = REQ_SYNC;
...@@ -897,8 +924,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type, ...@@ -897,8 +924,9 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
bool is_dirty = PageDirty(page); bool is_dirty = PageDirty(page);
retry: retry:
f2fs_wait_on_page_writeback(page, DATA, true, true);
set_page_dirty(page); set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) { if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode); inode_dec_dirty_pages(inode);
f2fs_remove_dirty_inode(inode); f2fs_remove_dirty_inode(inode);
...@@ -1093,15 +1121,18 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1093,15 +1121,18 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
struct blk_plug plug; struct blk_plug plug;
unsigned int segno = start_segno; unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec; unsigned int end_segno = start_segno + sbi->segs_per_sec;
int seg_freed = 0; int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE; SUM_TYPE_DATA : SUM_TYPE_NODE;
int submitted = 0; int submitted = 0;
if (__is_large_section(sbi))
end_segno = rounddown(end_segno, sbi->segs_per_sec);
/* readahead multi ssa blocks those have contiguous address */ /* readahead multi ssa blocks those have contiguous address */
if (sbi->segs_per_sec > 1) if (__is_large_section(sbi))
f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
sbi->segs_per_sec, META_SSA, true); end_segno - segno, META_SSA, true);
/* reference all summary page */ /* reference all summary page */
while (segno < end_segno) { while (segno < end_segno) {
...@@ -1130,10 +1161,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1130,10 +1161,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
GET_SUM_BLOCK(sbi, segno)); GET_SUM_BLOCK(sbi, segno));
f2fs_put_page(sum_page, 0); f2fs_put_page(sum_page, 0);
if (get_valid_blocks(sbi, segno, false) == 0 || if (get_valid_blocks(sbi, segno, false) == 0)
!PageUptodate(sum_page) || goto freed;
unlikely(f2fs_cp_error(sbi))) if (__is_large_section(sbi) &&
goto next; migrated >= sbi->migration_granularity)
goto skip;
if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
goto skip;
sum = page_address(sum_page); sum = page_address(sum_page);
if (type != GET_SUM_TYPE((&sum->footer))) { if (type != GET_SUM_TYPE((&sum->footer))) {
...@@ -1141,7 +1175,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1141,7 +1175,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
"type [%d, %d] in SSA and SIT", "type [%d, %d] in SSA and SIT",
segno, type, GET_SUM_TYPE((&sum->footer))); segno, type, GET_SUM_TYPE((&sum->footer)));
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
goto next; goto skip;
} }
/* /*
...@@ -1160,10 +1194,15 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -1160,10 +1194,15 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
stat_inc_seg_count(sbi, type, gc_type); stat_inc_seg_count(sbi, type, gc_type);
freed:
if (gc_type == FG_GC && if (gc_type == FG_GC &&
get_valid_blocks(sbi, segno, false) == 0) get_valid_blocks(sbi, segno, false) == 0)
seg_freed++; seg_freed++;
next: migrated++;
if (__is_large_section(sbi) && segno + 1 < end_segno)
sbi->next_victim_seg[gc_type] = segno + 1;
skip:
f2fs_put_page(sum_page, 0); f2fs_put_page(sum_page, 0);
} }
...@@ -1307,7 +1346,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) ...@@ -1307,7 +1346,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
/* give warm/cold data area from slower device */ /* give warm/cold data area from slower device */
if (sbi->s_ndevs && sbi->segs_per_sec == 1) if (sbi->s_ndevs && !__is_large_section(sbi))
SIT_I(sbi)->last_victim[ALLOC_NEXT] = SIT_I(sbi)->last_victim[ALLOC_NEXT] =
GET_SEGNO(sbi, FDEV(0).end_blk) + 1; GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
} }
...@@ -72,7 +72,7 @@ void f2fs_truncate_inline_inode(struct inode *inode, ...@@ -72,7 +72,7 @@ void f2fs_truncate_inline_inode(struct inode *inode,
addr = inline_data_addr(inode, ipage); addr = inline_data_addr(inode, ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memset(addr + from, 0, MAX_INLINE_DATA(inode) - from); memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
set_page_dirty(ipage); set_page_dirty(ipage);
...@@ -161,7 +161,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -161,7 +161,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
fio.old_blkaddr = dn->data_blkaddr; fio.old_blkaddr = dn->data_blkaddr;
set_inode_flag(dn->inode, FI_HOT_DATA); set_inode_flag(dn->inode, FI_HOT_DATA);
f2fs_outplace_write_data(dn, &fio); f2fs_outplace_write_data(dn, &fio);
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
if (dirty) { if (dirty) {
inode_dec_dirty_pages(dn->inode); inode_dec_dirty_pages(dn->inode);
f2fs_remove_dirty_inode(dn->inode); f2fs_remove_dirty_inode(dn->inode);
...@@ -236,7 +236,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) ...@@ -236,7 +236,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_bug_on(F2FS_I_SB(inode), page->index);
f2fs_wait_on_page_writeback(dn.inode_page, NODE, true); f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
src_addr = kmap_atomic(page); src_addr = kmap_atomic(page);
dst_addr = inline_data_addr(inode, dn.inode_page); dst_addr = inline_data_addr(inode, dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode)); memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
...@@ -277,7 +277,7 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ...@@ -277,7 +277,7 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
ipage = f2fs_get_node_page(sbi, inode->i_ino); ipage = f2fs_get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage)); f2fs_bug_on(sbi, IS_ERR(ipage));
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
src_addr = inline_data_addr(inode, npage); src_addr = inline_data_addr(inode, npage);
dst_addr = inline_data_addr(inode, ipage); dst_addr = inline_data_addr(inode, ipage);
...@@ -391,7 +391,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, ...@@ -391,7 +391,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
goto out; goto out;
} }
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true, true);
dentry_blk = page_address(page); dentry_blk = page_address(page);
...@@ -501,18 +501,18 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, ...@@ -501,18 +501,18 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
stat_dec_inline_dir(dir); stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY); clear_inode_flag(dir, FI_INLINE_DENTRY);
kfree(backup_dentry); kvfree(backup_dentry);
return 0; return 0;
recover: recover:
lock_page(ipage); lock_page(ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir)); memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0); f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir)); f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
set_page_dirty(ipage); set_page_dirty(ipage);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
kfree(backup_dentry); kvfree(backup_dentry);
return err; return err;
} }
...@@ -565,7 +565,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, ...@@ -565,7 +565,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
} }
} }
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
name_hash = f2fs_dentry_hash(new_name, NULL); name_hash = f2fs_dentry_hash(new_name, NULL);
f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
...@@ -597,7 +597,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, ...@@ -597,7 +597,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
int i; int i;
lock_page(page); lock_page(page);
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, true);
inline_dentry = inline_data_addr(dir, page); inline_dentry = inline_data_addr(dir, page);
make_dentry_ptr_inline(dir, &d, inline_dentry); make_dentry_ptr_inline(dir, &d, inline_dentry);
......
...@@ -103,7 +103,7 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage) ...@@ -103,7 +103,7 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
while (start < end) { while (start < end) {
if (*start++) { if (*start++) {
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
set_inode_flag(inode, FI_DATA_EXIST); set_inode_flag(inode, FI_DATA_EXIST);
set_raw_inline(inode, F2FS_INODE(ipage)); set_raw_inline(inode, F2FS_INODE(ipage));
...@@ -118,7 +118,7 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page ...@@ -118,7 +118,7 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
{ {
struct f2fs_inode *ri = &F2FS_NODE(page)->i; struct f2fs_inode *ri = &F2FS_NODE(page)->i;
if (!f2fs_sb_has_inode_chksum(sbi->sb)) if (!f2fs_sb_has_inode_chksum(sbi))
return false; return false;
if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
...@@ -218,7 +218,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) ...@@ -218,7 +218,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false; return false;
} }
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) if (f2fs_sb_has_flexible_inline_xattr(sbi)
&& !f2fs_has_extra_attr(inode)) { && !f2fs_has_extra_attr(inode)) {
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING, f2fs_msg(sbi->sb, KERN_WARNING,
...@@ -228,7 +228,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) ...@@ -228,7 +228,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
} }
if (f2fs_has_extra_attr(inode) && if (f2fs_has_extra_attr(inode) &&
!f2fs_sb_has_extra_attr(sbi->sb)) { !f2fs_sb_has_extra_attr(sbi)) {
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING, f2fs_msg(sbi->sb, KERN_WARNING,
"%s: inode (ino=%lx) is with extra_attr, " "%s: inode (ino=%lx) is with extra_attr, "
...@@ -340,7 +340,7 @@ static int do_read_inode(struct inode *inode) ...@@ -340,7 +340,7 @@ static int do_read_inode(struct inode *inode)
fi->i_extra_isize = f2fs_has_extra_attr(inode) ? fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
le16_to_cpu(ri->i_extra_isize) : 0; le16_to_cpu(ri->i_extra_isize) : 0;
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
} else if (f2fs_has_inline_xattr(inode) || } else if (f2fs_has_inline_xattr(inode) ||
f2fs_has_inline_dentry(inode)) { f2fs_has_inline_dentry(inode)) {
...@@ -390,14 +390,14 @@ static int do_read_inode(struct inode *inode) ...@@ -390,14 +390,14 @@ static int do_read_inode(struct inode *inode)
if (fi->i_flags & F2FS_PROJINHERIT_FL) if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT); set_inode_flag(inode, FI_PROJ_INHERIT);
if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) && if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
i_projid = (projid_t)le32_to_cpu(ri->i_projid); i_projid = (projid_t)le32_to_cpu(ri->i_projid);
else else
i_projid = F2FS_DEF_PROJID; i_projid = F2FS_DEF_PROJID;
fi->i_projid = make_kprojid(&init_user_ns, i_projid); fi->i_projid = make_kprojid(&init_user_ns, i_projid);
if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) && if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
...@@ -497,7 +497,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) ...@@ -497,7 +497,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
struct f2fs_inode *ri; struct f2fs_inode *ri;
struct extent_tree *et = F2FS_I(inode)->extent_tree; struct extent_tree *et = F2FS_I(inode)->extent_tree;
f2fs_wait_on_page_writeback(node_page, NODE, true); f2fs_wait_on_page_writeback(node_page, NODE, true, true);
set_page_dirty(node_page); set_page_dirty(node_page);
f2fs_inode_synced(inode); f2fs_inode_synced(inode);
...@@ -542,11 +542,11 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) ...@@ -542,11 +542,11 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
if (f2fs_has_extra_attr(inode)) { if (f2fs_has_extra_attr(inode)) {
ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb)) if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
ri->i_inline_xattr_size = ri->i_inline_xattr_size =
cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_projid)) { i_projid)) {
projid_t i_projid; projid_t i_projid;
...@@ -556,7 +556,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) ...@@ -556,7 +556,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri->i_projid = cpu_to_le32(i_projid); ri->i_projid = cpu_to_le32(i_projid);
} }
if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) && if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
i_crtime)) { i_crtime)) {
ri->i_crtime = ri->i_crtime =
......
...@@ -61,7 +61,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) ...@@ -61,7 +61,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
goto fail; goto fail;
} }
if (f2fs_sb_has_project_quota(sbi->sb) && if (f2fs_sb_has_project_quota(sbi) &&
(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL)) (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid; F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
else else
...@@ -79,7 +79,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) ...@@ -79,7 +79,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
f2fs_may_encrypt(inode)) f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode); f2fs_set_encrypted_inode(inode);
if (f2fs_sb_has_extra_attr(sbi->sb)) { if (f2fs_sb_has_extra_attr(sbi)) {
set_inode_flag(inode, FI_EXTRA_ATTR); set_inode_flag(inode, FI_EXTRA_ATTR);
F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
} }
...@@ -92,7 +92,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) ...@@ -92,7 +92,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_may_inline_dentry(inode)) if (f2fs_may_inline_dentry(inode))
set_inode_flag(inode, FI_INLINE_DENTRY); set_inode_flag(inode, FI_INLINE_DENTRY);
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
if (f2fs_has_inline_xattr(inode)) if (f2fs_has_inline_xattr(inode))
xattr_size = F2FS_OPTION(sbi).inline_xattr_size; xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
...@@ -635,7 +635,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, ...@@ -635,7 +635,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
f2fs_handle_failed_inode(inode); f2fs_handle_failed_inode(inode);
out_free_encrypted_link: out_free_encrypted_link:
if (disk_link.name != (unsigned char *)symname) if (disk_link.name != (unsigned char *)symname)
kfree(disk_link.name); kvfree(disk_link.name);
return err; return err;
} }
......
...@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn) ...@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni; struct node_info ni;
int err; int err;
pgoff_t index;
err = f2fs_get_node_info(sbi, dn->nid, &ni); err = f2fs_get_node_info(sbi, dn->nid, &ni);
if (err) if (err)
...@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn) ...@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page); clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY); set_sbi_flag(sbi, SBI_IS_DIRTY);
index = dn->node_page->index;
f2fs_put_page(dn->node_page, 1); f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi), invalidate_mapping_pages(NODE_MAPPING(sbi),
dn->node_page->index, dn->node_page->index); index, index);
dn->node_page = NULL; dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
...@@ -1104,7 +1106,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) ...@@ -1104,7 +1106,7 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page); lock_page(page);
BUG_ON(page->mapping != NODE_MAPPING(sbi)); BUG_ON(page->mapping != NODE_MAPPING(sbi));
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page); set_page_dirty(page);
unlock_page(page); unlock_page(page);
...@@ -1232,7 +1234,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) ...@@ -1232,7 +1234,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
new_ni.version = 0; new_ni.version = 0;
set_node_addr(sbi, &new_ni, NEW_ADDR, false); set_node_addr(sbi, &new_ni, NEW_ADDR, false);
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(page, S_ISDIR(dn->inode->i_mode)); set_cold_node(page, S_ISDIR(dn->inode->i_mode));
if (!PageUptodate(page)) if (!PageUptodate(page))
...@@ -1596,10 +1598,10 @@ int f2fs_move_node_page(struct page *node_page, int gc_type) ...@@ -1596,10 +1598,10 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
.for_reclaim = 0, .for_reclaim = 0,
}; };
f2fs_wait_on_page_writeback(node_page, NODE, true, true);
set_page_dirty(node_page); set_page_dirty(node_page);
f2fs_wait_on_page_writeback(node_page, NODE, true);
f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
if (!clear_page_dirty_for_io(node_page)) { if (!clear_page_dirty_for_io(node_page)) {
err = -EAGAIN; err = -EAGAIN;
goto out_page; goto out_page;
...@@ -1687,8 +1689,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -1687,8 +1689,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
goto continue_unlock; goto continue_unlock;
} }
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, true);
BUG_ON(PageWriteback(page));
set_fsync_mark(page, 0); set_fsync_mark(page, 0);
set_dentry_mark(page, 0); set_dentry_mark(page, 0);
...@@ -1739,7 +1740,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -1739,7 +1740,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
"Retry to write fsync mark: ino=%u, idx=%lx", "Retry to write fsync mark: ino=%u, idx=%lx",
ino, last_page->index); ino, last_page->index);
lock_page(last_page); lock_page(last_page);
f2fs_wait_on_page_writeback(last_page, NODE, true); f2fs_wait_on_page_writeback(last_page, NODE, true, true);
set_page_dirty(last_page); set_page_dirty(last_page);
unlock_page(last_page); unlock_page(last_page);
goto retry; goto retry;
...@@ -1820,9 +1821,8 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, ...@@ -1820,9 +1821,8 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
goto lock_node; goto lock_node;
} }
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, true);
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page)) if (!clear_page_dirty_for_io(page))
goto continue_unlock; goto continue_unlock;
...@@ -1889,7 +1889,7 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, ...@@ -1889,7 +1889,7 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
get_page(page); get_page(page);
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true, false);
if (TestClearPageError(page)) if (TestClearPageError(page))
ret = -EIO; ret = -EIO;
...@@ -2467,7 +2467,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) ...@@ -2467,7 +2467,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
src_addr = inline_xattr_addr(inode, page); src_addr = inline_xattr_addr(inode, page);
inline_size = inline_xattr_size(inode); inline_size = inline_xattr_size(inode);
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true, true);
memcpy(dst_addr, src_addr, inline_size); memcpy(dst_addr, src_addr, inline_size);
update_inode: update_inode:
f2fs_update_inode(inode, ipage); f2fs_update_inode(inode, ipage);
...@@ -2561,17 +2561,17 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) ...@@ -2561,17 +2561,17 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (dst->i_inline & F2FS_EXTRA_ATTR) { if (dst->i_inline & F2FS_EXTRA_ATTR) {
dst->i_extra_isize = src->i_extra_isize; dst->i_extra_isize = src->i_extra_isize;
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) && if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_inline_xattr_size)) i_inline_xattr_size))
dst->i_inline_xattr_size = src->i_inline_xattr_size; dst->i_inline_xattr_size = src->i_inline_xattr_size;
if (f2fs_sb_has_project_quota(sbi->sb) && if (f2fs_sb_has_project_quota(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_projid)) i_projid))
dst->i_projid = src->i_projid; dst->i_projid = src->i_projid;
if (f2fs_sb_has_inode_crtime(sbi->sb) && if (f2fs_sb_has_inode_crtime(sbi) &&
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_crtime_nsec)) { i_crtime_nsec)) {
dst->i_crtime = src->i_crtime; dst->i_crtime = src->i_crtime;
...@@ -3113,17 +3113,17 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) ...@@ -3113,17 +3113,17 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
for (i = 0; i < nm_i->nat_blocks; i++) for (i = 0; i < nm_i->nat_blocks; i++)
kvfree(nm_i->free_nid_bitmap[i]); kvfree(nm_i->free_nid_bitmap[i]);
kfree(nm_i->free_nid_bitmap); kvfree(nm_i->free_nid_bitmap);
} }
kvfree(nm_i->free_nid_count); kvfree(nm_i->free_nid_count);
kfree(nm_i->nat_bitmap); kvfree(nm_i->nat_bitmap);
kfree(nm_i->nat_bits); kvfree(nm_i->nat_bits);
#ifdef CONFIG_F2FS_CHECK_FS #ifdef CONFIG_F2FS_CHECK_FS
kfree(nm_i->nat_bitmap_mir); kvfree(nm_i->nat_bitmap_mir);
#endif #endif
sbi->nm_info = NULL; sbi->nm_info = NULL;
kfree(nm_i); kvfree(nm_i);
} }
int __init f2fs_create_node_manager_caches(void) int __init f2fs_create_node_manager_caches(void)
......
...@@ -361,7 +361,7 @@ static inline int set_nid(struct page *p, int off, nid_t nid, bool i) ...@@ -361,7 +361,7 @@ static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
{ {
struct f2fs_node *rn = F2FS_NODE(p); struct f2fs_node *rn = F2FS_NODE(p);
f2fs_wait_on_page_writeback(p, NODE, true); f2fs_wait_on_page_writeback(p, NODE, true, true);
if (i) if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid); rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
......
...@@ -250,7 +250,7 @@ static int recover_inode(struct inode *inode, struct page *page) ...@@ -250,7 +250,7 @@ static int recover_inode(struct inode *inode, struct page *page)
i_gid_write(inode, le32_to_cpu(raw->i_gid)); i_gid_write(inode, le32_to_cpu(raw->i_gid));
if (raw->i_inline & F2FS_EXTRA_ATTR) { if (raw->i_inline & F2FS_EXTRA_ATTR) {
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize), F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
i_projid)) { i_projid)) {
projid_t i_projid; projid_t i_projid;
...@@ -539,7 +539,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -539,7 +539,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
goto out; goto out;
} }
f2fs_wait_on_page_writeback(dn.node_page, NODE, true); f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
err = f2fs_get_node_info(sbi, dn.nid, &ni); err = f2fs_get_node_info(sbi, dn.nid, &ni);
if (err) if (err)
......
This diff is collapsed.
...@@ -333,7 +333,7 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, ...@@ -333,7 +333,7 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
* In order to get # of valid blocks in a section instantly from many * In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately. * segments, f2fs manages two counting structures separately.
*/ */
if (use_section && sbi->segs_per_sec > 1) if (use_section && __is_large_section(sbi))
return get_sec_entry(sbi, segno)->valid_blocks; return get_sec_entry(sbi, segno)->valid_blocks;
else else
return get_seg_entry(sbi, segno)->valid_blocks; return get_seg_entry(sbi, segno)->valid_blocks;
......
...@@ -135,6 +135,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi) ...@@ -135,6 +135,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi)); f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
spin_lock(&f2fs_list_lock); spin_lock(&f2fs_list_lock);
list_del(&sbi->s_list); list_del_init(&sbi->s_list);
spin_unlock(&f2fs_list_lock); spin_unlock(&f2fs_list_lock);
} }
This diff is collapsed.
...@@ -90,34 +90,34 @@ static ssize_t features_show(struct f2fs_attr *a, ...@@ -90,34 +90,34 @@ static ssize_t features_show(struct f2fs_attr *a,
if (!sb->s_bdev->bd_part) if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n"); return snprintf(buf, PAGE_SIZE, "0\n");
if (f2fs_sb_has_encrypt(sb)) if (f2fs_sb_has_encrypt(sbi))
len += snprintf(buf, PAGE_SIZE - len, "%s", len += snprintf(buf, PAGE_SIZE - len, "%s",
"encryption"); "encryption");
if (f2fs_sb_has_blkzoned(sb)) if (f2fs_sb_has_blkzoned(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "blkzoned"); len ? ", " : "", "blkzoned");
if (f2fs_sb_has_extra_attr(sb)) if (f2fs_sb_has_extra_attr(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "extra_attr"); len ? ", " : "", "extra_attr");
if (f2fs_sb_has_project_quota(sb)) if (f2fs_sb_has_project_quota(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "projquota"); len ? ", " : "", "projquota");
if (f2fs_sb_has_inode_chksum(sb)) if (f2fs_sb_has_inode_chksum(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_checksum"); len ? ", " : "", "inode_checksum");
if (f2fs_sb_has_flexible_inline_xattr(sb)) if (f2fs_sb_has_flexible_inline_xattr(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "flexible_inline_xattr"); len ? ", " : "", "flexible_inline_xattr");
if (f2fs_sb_has_quota_ino(sb)) if (f2fs_sb_has_quota_ino(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "quota_ino"); len ? ", " : "", "quota_ino");
if (f2fs_sb_has_inode_crtime(sb)) if (f2fs_sb_has_inode_crtime(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "inode_crtime"); len ? ", " : "", "inode_crtime");
if (f2fs_sb_has_lost_found(sb)) if (f2fs_sb_has_lost_found(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found"); len ? ", " : "", "lost_found");
if (f2fs_sb_has_sb_chksum(sb)) if (f2fs_sb_has_sb_chksum(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "sb_checksum"); len ? ", " : "", "sb_checksum");
len += snprintf(buf + len, PAGE_SIZE - len, "\n"); len += snprintf(buf + len, PAGE_SIZE - len, "\n");
...@@ -246,6 +246,11 @@ static ssize_t __sbi_store(struct f2fs_attr *a, ...@@ -246,6 +246,11 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count; return count;
} }
if (!strcmp(a->attr.name, "migration_granularity")) {
if (t == 0 || t > sbi->segs_per_sec)
return -EINVAL;
}
if (!strcmp(a->attr.name, "trim_sections")) if (!strcmp(a->attr.name, "trim_sections"))
return -EINVAL; return -EINVAL;
...@@ -406,6 +411,7 @@ F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh); ...@@ -406,6 +411,7 @@ F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
...@@ -460,6 +466,7 @@ static struct attribute *f2fs_attrs[] = { ...@@ -460,6 +466,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(min_hot_blocks), ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections), ATTR_LIST(min_ssr_sections),
ATTR_LIST(max_victim_search), ATTR_LIST(max_victim_search),
ATTR_LIST(migration_granularity),
ATTR_LIST(dir_level), ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh), ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages), ATTR_LIST(ra_nid_pages),
......
...@@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr) ...@@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
static int lookup_all_xattrs(struct inode *inode, struct page *ipage, static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len, unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe, const char *name, struct f2fs_xattr_entry **xe,
void **base_addr) void **base_addr, int *base_size)
{ {
void *cur_addr, *txattr_addr, *last_addr = NULL; void *cur_addr, *txattr_addr, *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid; nid_t xnid = F2FS_I(inode)->i_xattr_nid;
...@@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, ...@@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!size && !inline_size) if (!size && !inline_size)
return -ENODATA; return -ENODATA;
txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size = inline_size + size + XATTR_PADDING_SIZE;
inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS); txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
if (!txattr_addr) if (!txattr_addr)
return -ENOMEM; return -ENOMEM;
...@@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage, ...@@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
*xe = __find_inline_xattr(inode, txattr_addr, &last_addr, *xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
index, len, name); index, len, name);
if (*xe) if (*xe) {
*base_size = inline_size;
goto check; goto check;
}
} }
/* read from xattr node block */ /* read from xattr node block */
...@@ -415,7 +417,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, ...@@ -415,7 +417,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
} }
f2fs_wait_on_page_writeback(ipage ? ipage : in_page, f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
NODE, true); NODE, true, true);
/* no need to use xattr node block */ /* no need to use xattr node block */
if (hsize <= inline_size) { if (hsize <= inline_size) {
err = f2fs_truncate_xattr_node(inode); err = f2fs_truncate_xattr_node(inode);
...@@ -439,7 +441,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, ...@@ -439,7 +441,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
goto in_page_out; goto in_page_out;
} }
f2fs_bug_on(sbi, new_nid); f2fs_bug_on(sbi, new_nid);
f2fs_wait_on_page_writeback(xpage, NODE, true); f2fs_wait_on_page_writeback(xpage, NODE, true, true);
} else { } else {
struct dnode_of_data dn; struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid); set_new_dnode(&dn, inode, NULL, NULL, new_nid);
...@@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, ...@@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
int error = 0; int error = 0;
unsigned int size, len; unsigned int size, len;
void *base_addr = NULL; void *base_addr = NULL;
int base_size;
if (name == NULL) if (name == NULL)
return -EINVAL; return -EINVAL;
...@@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, ...@@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
down_read(&F2FS_I(inode)->i_xattr_sem); down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name, error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr); &entry, &base_addr, &base_size);
up_read(&F2FS_I(inode)->i_xattr_sem); up_read(&F2FS_I(inode)->i_xattr_sem);
if (error) if (error)
return error; return error;
...@@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, ...@@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (buffer) { if (buffer) {
char *pval = entry->e_name + entry->e_name_len; char *pval = entry->e_name + entry->e_name_len;
if (base_size - (pval - (char *)base_addr) < size) {
error = -ERANGE;
goto out;
}
memcpy(buffer, pval, size); memcpy(buffer, pval, size);
} }
error = size; error = size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment