Commit cfcc0ad4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "New features:
   - per-file encryption (e.g., ext4)
   - FALLOC_FL_ZERO_RANGE
   - FALLOC_FL_COLLAPSE_RANGE
   - RENAME_WHITEOUT

  Major enhancement/fixes:
   - recovery broken superblocks
   - enhance f2fs_trim_fs with a discard_map
   - fix a race condition on dentry block allocation
   - fix a deadlock during summary operation
   - fix a missing fiemap result

  .. and many minor bug fixes and clean-ups were done"

* tag 'for-f2fs-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (83 commits)
  f2fs: do not trim preallocated blocks when truncating after i_size
  f2fs crypto: add alloc_bounce_page
  f2fs crypto: fix to handle errors likewise ext4
  f2fs: drop the volatile_write flag only
  f2fs: skip committing valid superblock
  f2fs: setting discard option in parse_options()
  f2fs: fix to return exact trimmed size
  f2fs: support FALLOC_FL_INSERT_RANGE
  f2fs: hide common code in f2fs_replace_block
  f2fs: disable the discard option when device doesn't support
  f2fs crypto: remove alloc_page for bounce_page
  f2fs: fix a deadlock for summary page lock vs. sentry_lock
  f2fs crypto: clean up error handling in f2fs_fname_setup_filename
  f2fs crypto: avoid f2fs_inherit_context for symlink
  f2fs crypto: do not set encryption policy for non-directory by ioctl
  f2fs crypto: allow setting encryption policy once
  f2fs crypto: check context consistent for rename2
  f2fs: avoid duplicated code by reusing f2fs_read_end_io
  f2fs crypto: use per-inode tfm structure
  f2fs: recovering broken superblock during mount
  ...
parents a7296b49 3c454145
......@@ -72,6 +72,25 @@ config F2FS_CHECK_FS
If you want to improve the performance, say N.
config F2FS_FS_ENCRYPTION
bool "F2FS Encryption"
depends on F2FS_FS
depends on F2FS_FS_XATTR
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_CTR
select CRYPTO_SHA256
select KEYS
select ENCRYPTED_KEYS
help
Enable encryption of f2fs files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
config F2FS_IO_TRACE
bool "F2FS IO tracer"
depends on F2FS_FS
......
......@@ -6,3 +6,5 @@ f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
crypto_key.o crypto_fname.o
......@@ -334,51 +334,45 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
struct page *dpage)
{
struct posix_acl *p;
struct posix_acl *clone;
int ret;
*acl = NULL;
*default_acl = NULL;
if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
goto no_acl;
return 0;
p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
if (IS_ERR(p)) {
if (p == ERR_PTR(-EOPNOTSUPP))
goto apply_umask;
return PTR_ERR(p);
if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
*mode &= ~current_umask();
return 0;
}
if (IS_ERR(p))
return PTR_ERR(p);
if (!p)
goto apply_umask;
*acl = f2fs_acl_clone(p, GFP_NOFS);
if (!*acl)
clone = f2fs_acl_clone(p, GFP_NOFS);
if (!clone)
goto no_mem;
ret = f2fs_acl_create_masq(*acl, mode);
ret = f2fs_acl_create_masq(clone, mode);
if (ret < 0)
goto no_mem_clone;
if (ret == 0) {
posix_acl_release(*acl);
*acl = NULL;
}
if (ret == 0)
posix_acl_release(clone);
else
*acl = clone;
if (!S_ISDIR(*mode)) {
if (!S_ISDIR(*mode))
posix_acl_release(p);
*default_acl = NULL;
} else {
else
*default_acl = p;
}
return 0;
apply_umask:
*mode &= ~current_umask();
no_acl:
*default_acl = NULL;
*acl = NULL;
return 0;
no_mem_clone:
posix_acl_release(*acl);
posix_acl_release(clone);
no_mem:
posix_acl_release(p);
return -ENOMEM;
......
......@@ -52,9 +52,11 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
struct address_space *mapping = META_MAPPING(sbi);
struct page *page;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO,
.blk_addr = index,
.encrypted_page = NULL,
};
repeat:
page = grab_cache_page(mapping, index);
......@@ -65,7 +67,9 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
if (PageUptodate(page))
goto out;
if (f2fs_submit_page_bio(sbi, page, &fio))
fio.page = page;
if (f2fs_submit_page_bio(&fio))
goto repeat;
lock_page(page);
......@@ -77,8 +81,7 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return page;
}
static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
{
switch (type) {
case META_NAT:
......@@ -118,8 +121,10 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
.rw = READ_SYNC | REQ_META | REQ_PRIO,
.encrypted_page = NULL,
};
for (; nrpages-- > 0; blkno++) {
......@@ -161,7 +166,8 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
continue;
}
f2fs_submit_page_mbio(sbi, page, &fio);
fio.page = page;
f2fs_submit_page_mbio(&fio);
f2fs_put_page(page, 0);
}
out:
......@@ -510,7 +516,12 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
grab_meta_page(sbi, start_blk + index);
index = 1;
spin_lock(&im->ino_lock);
/*
* we don't need to do spin_lock(&im->ino_lock) here, since all the
* orphan inode operations are covered under f2fs_lock_op().
* And, spin_lock should be avoided due to page operations below.
*/
head = &im->ino_list;
/* loop for each orphan inode entry and write them in Jornal block */
......@@ -550,8 +561,6 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
set_page_dirty(page);
f2fs_put_page(page, 1);
}
spin_unlock(&im->ino_lock);
}
static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
......@@ -879,10 +888,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
block_t start_blk;
struct page *cp_page;
unsigned int data_sum_blocks, orphan_blocks;
__u32 crc32 = 0;
void *kaddr;
int i;
int cp_payload_blks = __cp_payload(sbi);
......@@ -979,19 +986,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk = __start_cp_addr(sbi);
/* write out checkpoint buffer at block 0 */
cp_page = grab_meta_page(sbi, start_blk++);
kaddr = page_address(cp_page);
memcpy(kaddr, ckpt, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
for (i = 1; i < 1 + cp_payload_blks; i++) {
cp_page = grab_meta_page(sbi, start_blk++);
kaddr = page_address(cp_page);
memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
}
update_meta_page(sbi, ckpt, start_blk++);
for (i = 1; i < 1 + cp_payload_blks; i++)
update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
start_blk++);
if (orphan_num) {
write_orphan_inodes(sbi, start_blk);
......@@ -1006,11 +1005,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
/* writeout checkpoint block */
cp_page = grab_meta_page(sbi, start_blk);
kaddr = page_address(cp_page);
memcpy(kaddr, ckpt, F2FS_BLKSIZE);
set_page_dirty(cp_page);
f2fs_put_page(cp_page, 1);
update_meta_page(sbi, ckpt, start_blk);
/* wait for previous submitted node/meta pages writeback */
wait_on_all_pages_writeback(sbi);
......@@ -1036,7 +1031,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (unlikely(f2fs_cp_error(sbi)))
return;
clear_prefree_segments(sbi);
clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
}
......@@ -1051,7 +1046,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC))
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
goto out;
if (unlikely(f2fs_cp_error(sbi)))
goto out;
......
This diff is collapsed.
This diff is collapsed.
/*
* linux/fs/f2fs/crypto_key.c
*
* Copied from linux/fs/f2fs/crypto_key.c
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption key functions for f2fs
*
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
*/
#include <keys/encrypted-type.h>
#include <keys/user-type.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <uapi/linux/keyctl.h>
#include <crypto/hash.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
{
struct f2fs_completion_result *ecr = req->data;
if (rc == -EINPROGRESS)
return;
ecr->res = rc;
complete(&ecr->completion);
}
/**
* f2fs_derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivatio.
* @source_key: Source key to which to apply derivation.
* @derived_key: Derived key.
*
* Return: Zero on success; non-zero otherwise.
*/
static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE],
char source_key[F2FS_AES_256_XTS_KEY_SIZE],
char derived_key[F2FS_AES_256_XTS_KEY_SIZE])
{
int res = 0;
struct ablkcipher_request *req = NULL;
DECLARE_F2FS_COMPLETION_RESULT(ecr);
struct scatterlist src_sg, dst_sg;
struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
0);
if (IS_ERR(tfm)) {
res = PTR_ERR(tfm);
tfm = NULL;
goto out;
}
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
res = -ENOMEM;
goto out;
}
ablkcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
derive_crypt_complete, &ecr);
res = crypto_ablkcipher_setkey(tfm, deriving_key,
F2FS_AES_128_ECB_KEY_SIZE);
if (res < 0)
goto out;
sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE);
sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE);
ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
F2FS_AES_256_XTS_KEY_SIZE, NULL);
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
out:
if (req)
ablkcipher_request_free(req);
if (tfm)
crypto_free_ablkcipher(tfm);
return res;
}
static void f2fs_free_crypt_info(struct f2fs_crypt_info *ci)
{
if (!ci)
return;
if (ci->ci_keyring_key)
key_put(ci->ci_keyring_key);
crypto_free_ablkcipher(ci->ci_ctfm);
kmem_cache_free(f2fs_crypt_info_cachep, ci);
}
void f2fs_free_encryption_info(struct inode *inode, struct f2fs_crypt_info *ci)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_crypt_info *prev;
if (ci == NULL)
ci = ACCESS_ONCE(fi->i_crypt_info);
if (ci == NULL)
return;
prev = cmpxchg(&fi->i_crypt_info, ci, NULL);
if (prev != ci)
return;
f2fs_free_crypt_info(ci);
}
int _f2fs_get_encryption_info(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_crypt_info *crypt_info;
char full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE +
(F2FS_KEY_DESCRIPTOR_SIZE * 2) + 1];
struct key *keyring_key = NULL;
struct f2fs_encryption_key *master_key;
struct f2fs_encryption_context ctx;
struct user_key_payload *ukp;
struct crypto_ablkcipher *ctfm;
const char *cipher_str;
char raw_key[F2FS_MAX_KEY_SIZE];
char mode;
int res;
res = f2fs_crypto_initialize();
if (res)
return res;
retry:
crypt_info = ACCESS_ONCE(fi->i_crypt_info);
if (crypt_info) {
if (!crypt_info->ci_keyring_key ||
key_validate(crypt_info->ci_keyring_key) == 0)
return 0;
f2fs_free_encryption_info(inode, crypt_info);
goto retry;
}
res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
&ctx, sizeof(ctx), NULL);
if (res < 0)
return res;
else if (res != sizeof(ctx))
return -EINVAL;
res = 0;
crypt_info = kmem_cache_alloc(f2fs_crypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
crypt_info->ci_flags = ctx.flags;
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
crypt_info->ci_keyring_key = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
if (S_ISREG(inode->i_mode))
mode = crypt_info->ci_data_mode;
else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
mode = crypt_info->ci_filename_mode;
else
BUG();
switch (mode) {
case F2FS_ENCRYPTION_MODE_AES_256_XTS:
cipher_str = "xts(aes)";
break;
case F2FS_ENCRYPTION_MODE_AES_256_CTS:
cipher_str = "cts(cbc(aes))";
break;
default:
printk_once(KERN_WARNING
"f2fs: unsupported key mode %d (ino %u)\n",
mode, (unsigned) inode->i_ino);
res = -ENOKEY;
goto out;
}
memcpy(full_key_descriptor, F2FS_KEY_DESC_PREFIX,
F2FS_KEY_DESC_PREFIX_SIZE);
sprintf(full_key_descriptor + F2FS_KEY_DESC_PREFIX_SIZE,
"%*phN", F2FS_KEY_DESCRIPTOR_SIZE,
ctx.master_key_descriptor);
full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE +
(2 * F2FS_KEY_DESCRIPTOR_SIZE)] = '\0';
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
if (IS_ERR(keyring_key)) {
res = PTR_ERR(keyring_key);
keyring_key = NULL;
goto out;
}
crypt_info->ci_keyring_key = keyring_key;
BUG_ON(keyring_key->type != &key_type_logon);
ukp = ((struct user_key_payload *)keyring_key->payload.data);
if (ukp->datalen != sizeof(struct f2fs_encryption_key)) {
res = -EINVAL;
goto out;
}
master_key = (struct f2fs_encryption_key *)ukp->data;
BUILD_BUG_ON(F2FS_AES_128_ECB_KEY_SIZE !=
F2FS_KEY_DERIVATION_NONCE_SIZE);
BUG_ON(master_key->size != F2FS_AES_256_XTS_KEY_SIZE);
res = f2fs_derive_key_aes(ctx.nonce, master_key->raw,
raw_key);
if (res)
goto out;
ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) {
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
printk(KERN_DEBUG
"%s: error %d (inode %u) allocating crypto tfm\n",
__func__, res, (unsigned) inode->i_ino);
goto out;
}
crypt_info->ci_ctfm = ctfm;
crypto_ablkcipher_clear_flags(ctfm, ~0);
crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
CRYPTO_TFM_REQ_WEAK_KEY);
res = crypto_ablkcipher_setkey(ctfm, raw_key,
f2fs_encryption_key_size(mode));
if (res)
goto out;
memzero_explicit(raw_key, sizeof(raw_key));
if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) != NULL) {
f2fs_free_crypt_info(crypt_info);
goto retry;
}
return 0;
out:
if (res == -ENOKEY && !S_ISREG(inode->i_mode))
res = 0;
f2fs_free_crypt_info(crypt_info);
memzero_explicit(raw_key, sizeof(raw_key));
return res;
}
int f2fs_has_encryption_key(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
return (fi->i_crypt_info != NULL);
}
/*
* copied from linux/fs/ext4/crypto_policy.c
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility.
*
* This contains encryption policy functions for f2fs with some modifications
* to support f2fs-specific xattr APIs.
*
* Written by Michael Halcrow, 2015.
* Modified by Jaegeuk Kim, 2015.
*/
#include <linux/random.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
static int f2fs_inode_has_encryption_context(struct inode *inode)
{
int res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0, NULL);
return (res > 0);
}
/*
* check whether the policy is consistent with the encryption context
* for the inode
*/
static int f2fs_is_encryption_context_consistent_with_policy(
struct inode *inode, const struct f2fs_encryption_policy *policy)
{
struct f2fs_encryption_context ctx;
int res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), NULL);
if (res != sizeof(ctx))
return 0;
return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
F2FS_KEY_DESCRIPTOR_SIZE) == 0 &&
(ctx.flags == policy->flags) &&
(ctx.contents_encryption_mode ==
policy->contents_encryption_mode) &&
(ctx.filenames_encryption_mode ==
policy->filenames_encryption_mode));
}
static int f2fs_create_encryption_context_from_policy(
struct inode *inode, const struct f2fs_encryption_policy *policy)
{
struct f2fs_encryption_context ctx;
ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
F2FS_KEY_DESCRIPTOR_SIZE);
if (!f2fs_valid_contents_enc_mode(policy->contents_encryption_mode)) {
printk(KERN_WARNING
"%s: Invalid contents encryption mode %d\n", __func__,
policy->contents_encryption_mode);
return -EINVAL;
}
if (!f2fs_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
printk(KERN_WARNING
"%s: Invalid filenames encryption mode %d\n", __func__,
policy->filenames_encryption_mode);
return -EINVAL;
}
if (policy->flags & ~F2FS_POLICY_FLAGS_VALID)
return -EINVAL;
ctx.contents_encryption_mode = policy->contents_encryption_mode;
ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
ctx.flags = policy->flags;
BUILD_BUG_ON(sizeof(ctx.nonce) != F2FS_KEY_DERIVATION_NONCE_SIZE);
get_random_bytes(ctx.nonce, F2FS_KEY_DERIVATION_NONCE_SIZE);
return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), NULL, XATTR_CREATE);
}
int f2fs_process_policy(const struct f2fs_encryption_policy *policy,
struct inode *inode)
{
if (policy->version != 0)
return -EINVAL;
if (!S_ISDIR(inode->i_mode))
return -EINVAL;
if (!f2fs_inode_has_encryption_context(inode)) {
if (!f2fs_empty_dir(inode))
return -ENOTEMPTY;
return f2fs_create_encryption_context_from_policy(inode,
policy);
}
if (f2fs_is_encryption_context_consistent_with_policy(inode, policy))
return 0;
printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
__func__);
return -EINVAL;
}
int f2fs_get_policy(struct inode *inode, struct f2fs_encryption_policy *policy)
{
struct f2fs_encryption_context ctx;
int res;
if (!f2fs_encrypted_inode(inode))
return -ENODATA;
res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
&ctx, sizeof(ctx), NULL);
if (res != sizeof(ctx))
return -ENODATA;
if (ctx.format != F2FS_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
policy->version = 0;
policy->contents_encryption_mode = ctx.contents_encryption_mode;
policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
policy->flags = ctx.flags;
memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
F2FS_KEY_DESCRIPTOR_SIZE);
return 0;
}
int f2fs_is_child_context_consistent_with_parent(struct inode *parent,
struct inode *child)
{
struct f2fs_crypt_info *parent_ci, *child_ci;
int res;
if ((parent == NULL) || (child == NULL)) {
pr_err("parent %p child %p\n", parent, child);
BUG_ON(1);
}
/* no restrictions if the parent directory is not encrypted */
if (!f2fs_encrypted_inode(parent))
return 1;
/* if the child directory is not encrypted, this is always a problem */
if (!f2fs_encrypted_inode(child))
return 0;
res = f2fs_get_encryption_info(parent);
if (res)
return 0;
res = f2fs_get_encryption_info(child);
if (res)
return 0;
parent_ci = F2FS_I(parent)->i_crypt_info;
child_ci = F2FS_I(child)->i_crypt_info;
if (!parent_ci && !child_ci)
return 1;
if (!parent_ci || !child_ci)
return 0;
return (memcmp(parent_ci->ci_master_key,
child_ci->ci_master_key,
F2FS_KEY_DESCRIPTOR_SIZE) == 0 &&
(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
(parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
(parent_ci->ci_flags == child_ci->ci_flags));
}
/**
* f2fs_inherit_context() - Sets a child context from its parent
* @parent: Parent inode from which the context is inherited.
* @child: Child inode that inherits the context from @parent.
*
* Return: Zero on success, non-zero otherwise
*/
int f2fs_inherit_context(struct inode *parent, struct inode *child,
struct page *ipage)
{
struct f2fs_encryption_context ctx;
struct f2fs_crypt_info *ci;
int res;
res = f2fs_get_encryption_info(parent);
if (res < 0)
return res;
ci = F2FS_I(parent)->i_crypt_info;
BUG_ON(ci == NULL);
ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = ci->ci_data_mode;
ctx.filenames_encryption_mode = ci->ci_filename_mode;
ctx.flags = ci->ci_flags;
memcpy(ctx.master_key_descriptor, ci->ci_master_key,
F2FS_KEY_DESCRIPTOR_SIZE);
get_random_bytes(ctx.nonce, F2FS_KEY_DERIVATION_NONCE_SIZE);
return f2fs_setxattr(child, F2FS_XATTR_INDEX_ENCRYPTION,
F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), ipage, XATTR_CREATE);
}
This diff is collapsed.
......@@ -94,7 +94,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
static void update_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist;
unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
unsigned long long bimodal, dist;
unsigned int segno, vblocks;
int ndirty = 0;
......@@ -112,10 +113,10 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
ndirty++;
}
}
dist = MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec / 100;
si->bimodal = bimodal / dist;
dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
si->bimodal = div_u64(bimodal, dist);
if (si->dirty_count)
si->avg_vblocks = total_vblocks / ndirty;
si->avg_vblocks = div_u64(total_vblocks, ndirty);
else
si->avg_vblocks = 0;
}
......@@ -143,7 +144,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
......
This diff is collapsed.
This diff is collapsed.
/*
* linux/fs/f2fs/f2fs_crypto.h
*
* Copied from linux/fs/ext4/ext4_crypto.h
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption header content for f2fs
*
* Written by Michael Halcrow, 2015.
* Modified by Jaegeuk Kim, 2015.
*/
#ifndef _F2FS_CRYPTO_H
#define _F2FS_CRYPTO_H
#include <linux/fs.h>
#define F2FS_KEY_DESCRIPTOR_SIZE 8
/* Policy provided via an ioctl on the topmost directory */
struct f2fs_encryption_policy {
char version;
char contents_encryption_mode;
char filenames_encryption_mode;
char flags;
char master_key_descriptor[F2FS_KEY_DESCRIPTOR_SIZE];
} __attribute__((__packed__));
#define F2FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
#define F2FS_KEY_DERIVATION_NONCE_SIZE 16
#define F2FS_POLICY_FLAGS_PAD_4 0x00
#define F2FS_POLICY_FLAGS_PAD_8 0x01
#define F2FS_POLICY_FLAGS_PAD_16 0x02
#define F2FS_POLICY_FLAGS_PAD_32 0x03
#define F2FS_POLICY_FLAGS_PAD_MASK 0x03
#define F2FS_POLICY_FLAGS_VALID 0x03
/**
* Encryption context for inode
*
* Protector format:
* 1 byte: Protector format (1 = this version)
* 1 byte: File contents encryption mode
* 1 byte: File names encryption mode
* 1 byte: Flags
* 8 bytes: Master Key descriptor
* 16 bytes: Encryption Key derivation nonce
*/
struct f2fs_encryption_context {
char format;
char contents_encryption_mode;
char filenames_encryption_mode;
char flags;
char master_key_descriptor[F2FS_KEY_DESCRIPTOR_SIZE];
char nonce[F2FS_KEY_DERIVATION_NONCE_SIZE];
} __attribute__((__packed__));
/* Encryption parameters */
#define F2FS_XTS_TWEAK_SIZE 16
#define F2FS_AES_128_ECB_KEY_SIZE 16
#define F2FS_AES_256_GCM_KEY_SIZE 32
#define F2FS_AES_256_CBC_KEY_SIZE 32
#define F2FS_AES_256_CTS_KEY_SIZE 32
#define F2FS_AES_256_XTS_KEY_SIZE 64
#define F2FS_MAX_KEY_SIZE 64
#define F2FS_KEY_DESC_PREFIX "f2fs:"
#define F2FS_KEY_DESC_PREFIX_SIZE 5
struct f2fs_encryption_key {
__u32 mode;
char raw[F2FS_MAX_KEY_SIZE];
__u32 size;
} __attribute__((__packed__));
struct f2fs_crypt_info {
char ci_data_mode;
char ci_filename_mode;
char ci_flags;
struct crypto_ablkcipher *ci_ctfm;
struct key *ci_keyring_key;
char ci_master_key[F2FS_KEY_DESCRIPTOR_SIZE];
};
#define F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define F2FS_WRITE_PATH_FL 0x00000002
struct f2fs_crypto_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
};
char flags; /* Flags */
};
struct f2fs_completion_result {
struct completion completion;
int res;
};
#define DECLARE_F2FS_COMPLETION_RESULT(ecr) \
struct f2fs_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
static inline int f2fs_encryption_key_size(int mode)
{
switch (mode) {
case F2FS_ENCRYPTION_MODE_AES_256_XTS:
return F2FS_AES_256_XTS_KEY_SIZE;
case F2FS_ENCRYPTION_MODE_AES_256_GCM:
return F2FS_AES_256_GCM_KEY_SIZE;
case F2FS_ENCRYPTION_MODE_AES_256_CBC:
return F2FS_AES_256_CBC_KEY_SIZE;
case F2FS_ENCRYPTION_MODE_AES_256_CTS:
return F2FS_AES_256_CTS_KEY_SIZE;
default:
BUG();
}
return 0;
}
#define F2FS_FNAME_NUM_SCATTER_ENTRIES 4
#define F2FS_CRYPTO_BLOCK_SIZE 16
#define F2FS_FNAME_CRYPTO_DIGEST_SIZE 32
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
*/
struct f2fs_encrypted_symlink_data {
__le16 len;
char encrypted_path[1];
} __attribute__((__packed__));
/**
* This function is used to calculate the disk space required to
* store a filename of length l in encrypted symlink format.
*/
static inline u32 encrypted_symlink_data_len(u32 l)
{
return (l + sizeof(struct f2fs_encrypted_symlink_data) - 1);
}
#endif /* _F2FS_CRYPTO_H */
This diff is collapsed.
......@@ -518,12 +518,79 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return 1;
}
static void move_data_page(struct inode *inode, struct page *page, int gc_type)
static void move_encrypted_block(struct inode *inode, block_t bidx)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
.rw = WRITE_SYNC,
.rw = READ_SYNC,
.encrypted_page = NULL,
};
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
struct page *page;
int err;
/* do not read out */
page = grab_cache_page(inode->i_mapping, bidx);
if (!page)
return;
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
goto out;
if (unlikely(dn.data_blkaddr == NULL_ADDR))
goto put_out;
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
fio.page = page;
fio.blk_addr = dn.data_blkaddr;
fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
if (!fio.encrypted_page)
goto put_out;
f2fs_submit_page_bio(&fio);
/* allocate block address */
f2fs_wait_on_page_writeback(dn.node_page, NODE);
allocate_data_block(fio.sbi, NULL, fio.blk_addr,
&fio.blk_addr, &sum, CURSEG_COLD_DATA);
dn.data_blkaddr = fio.blk_addr;
/* write page */
lock_page(fio.encrypted_page);
set_page_writeback(fio.encrypted_page);
fio.rw = WRITE_SYNC;
f2fs_submit_page_mbio(&fio);
set_data_blkaddr(&dn);
f2fs_update_extent_cache(&dn);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
if (page->index == 0)
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
f2fs_put_page(fio.encrypted_page, 1);
put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
}
static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
{
struct page *page;
page = get_lock_data_page(inode, bidx);
if (IS_ERR(page))
return;
if (gc_type == BG_GC) {
if (PageWriteback(page))
......@@ -531,12 +598,19 @@ static void move_data_page(struct inode *inode, struct page *page, int gc_type)
set_page_dirty(page);
set_cold_data(page);
} else {
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
.rw = WRITE_SYNC,
.page = page,
.encrypted_page = NULL,
};
f2fs_wait_on_page_writeback(page, DATA);
if (clear_page_dirty_for_io(page))
inode_dec_dirty_pages(inode);
set_cold_data(page);
do_write_data_page(page, &fio);
do_write_data_page(&fio);
clear_cold_data(page);
}
out:
......@@ -599,10 +673,16 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
/* if encrypted inode, let's go phase 3 */
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
add_gc_inode(gc_list, inode);
continue;
}
data_page = find_data_page(inode,
start_bidx + ofs_in_node, false);
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, READA);
if (IS_ERR(data_page)) {
iput(inode);
continue;
......@@ -616,12 +696,12 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
/* phase 3 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
data_page = get_lock_data_page(inode,
start_bidx + ofs_in_node);
if (IS_ERR(data_page))
continue;
move_data_page(inode, data_page, gc_type);
start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+ ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
move_encrypted_block(inode, start_bidx);
else
move_data_page(inode, start_bidx, gc_type);
stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
......@@ -670,6 +750,15 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
sum = page_address(sum_page);
/*
* this is to avoid deadlock:
* - lock_page(sum_page) - f2fs_replace_block
* - check_valid_map() - mutex_lock(sentry_lock)
* - mutex_lock(sentry_lock) - change_curseg()
* - lock_page(sum_page)
*/
unlock_page(sum_page);
switch (GET_SUM_TYPE((&sum->footer))) {
case SUM_TYPE_NODE:
gc_node_segment(sbi, sum->entries, segno, gc_type);
......@@ -683,7 +772,7 @@ static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
stat_inc_call_count(sbi->stat_info);
f2fs_put_page(sum_page, 1);
f2fs_put_page(sum_page, 0);
}
int f2fs_gc(struct f2fs_sb_info *sbi)
......
......@@ -79,8 +79,7 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
const unsigned char *name = name_info->name;
size_t len = name_info->len;
if ((len <= 2) && (name[0] == '.') &&
(name[1] == '.' || name[1] == '\0'))
if (is_dot_dotdot(name_info))
return 0;
/* Initialize the default seed for the hash checksum functions */
......
......@@ -13,7 +13,7 @@
#include "f2fs.h"
bool f2fs_may_inline(struct inode *inode)
bool f2fs_may_inline_data(struct inode *inode)
{
if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
return false;
......@@ -27,6 +27,20 @@ bool f2fs_may_inline(struct inode *inode)
if (i_size_read(inode) > MAX_INLINE_DATA)
return false;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return false;
return true;
}
bool f2fs_may_inline_dentry(struct inode *inode)
{
if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
return false;
if (!S_ISDIR(inode->i_mode))
return false;
return true;
}
......@@ -95,8 +109,11 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
void *src_addr, *dst_addr;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.type = DATA,
.rw = WRITE_SYNC | REQ_PRIO,
.page = page,
.encrypted_page = NULL,
};
int dirty, err;
......@@ -130,7 +147,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
/* write data page to try to make data consistent */
set_page_writeback(page);
fio.blk_addr = dn->data_blkaddr;
write_data_page(page, dn, &fio);
write_data_page(dn, &fio);
set_data_blkaddr(dn);
f2fs_update_extent_cache(dn);
f2fs_wait_on_page_writeback(page, DATA);
......@@ -267,23 +284,26 @@ bool recover_inline_data(struct inode *inode, struct page *npage)
}
struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
struct qstr *name, struct page **res_page)
struct f2fs_filename *fname, struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_inline_dentry *inline_dentry;
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
struct page *ipage;
f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage))
return NULL;
inline_dentry = inline_data_addr(ipage);
namehash = f2fs_dentry_hash(&name);
make_dentry_ptr(&d, (void *)inline_dentry, 2);
de = find_target_dentry(name, NULL, &d);
inline_dentry = inline_data_addr(ipage);
make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
de = find_target_dentry(fname, namehash, NULL, &d);
unlock_page(ipage);
if (de)
*res_page = ipage;
......@@ -325,7 +345,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
dentry_blk = inline_data_addr(ipage);
make_dentry_ptr(&d, (void *)dentry_blk, 2);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
do_make_empty_dir(inode, parent, &d);
set_page_dirty(ipage);
......@@ -429,7 +449,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
f2fs_wait_on_page_writeback(ipage, NODE);
name_hash = f2fs_dentry_hash(name);
make_dentry_ptr(&d, (void *)dentry_blk, 2);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
set_page_dirty(ipage);
......@@ -506,7 +526,8 @@ bool f2fs_empty_inline_dir(struct inode *dir)
return true;
}
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx)
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
struct f2fs_str *fstr)
{
struct inode *inode = file_inode(file);
struct f2fs_inline_dentry *inline_dentry = NULL;
......@@ -522,9 +543,9 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx)
inline_dentry = inline_data_addr(ipage);
make_dentry_ptr(&d, (void *)inline_dentry, 2);
make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
if (!f2fs_fill_dentries(ctx, &d, 0))
if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
ctx->pos = NR_INLINE_DENTRY;
f2fs_put_page(ipage, 1);
......
......@@ -198,6 +198,9 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
} else if (S_ISLNK(inode->i_mode)) {
if (f2fs_encrypted_inode(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
......@@ -359,6 +362,10 @@ void f2fs_evict_inode(struct inode *inode)
if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
out_clear:
#ifdef CONFIG_F2FS_FS_ENCRYPTION
if (F2FS_I(inode)->i_crypt_info)
f2fs_free_encryption_info(inode, F2FS_I(inode)->i_crypt_info);
#endif
clear_inode(inode);
}
......
This diff is collapsed.
......@@ -195,32 +195,35 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
start, nr);
}
bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
bool is_cp = true;
bool need = false;
down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE))
need = true;
}
up_read(&nm_i->nat_tree_lock);
return is_cp;
return need;
}
bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino)
bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
bool fsynced = false;
bool is_cp = true;
down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ino);
if (e && get_nat_flag(e, HAS_FSYNCED_INODE))
fsynced = true;
e = __lookup_nat_cache(nm_i, nid);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
up_read(&nm_i->nat_tree_lock);
return fsynced;
return is_cp;
}
bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
......@@ -312,6 +315,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
__set_nat_cache_dirty(nm_i, e);
/* update fsync_mark if its inode nat entry is still alive */
if (ni->nid != ni->ino)
e = __lookup_nat_cache(nm_i, ni->ino);
if (e) {
if (fsync_done && ni->nid == ni->ino)
......@@ -995,8 +999,11 @@ static int read_node_page(struct page *page, int rw)
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
.rw = rw,
.page = page,
.encrypted_page = NULL,
};
get_node_info(sbi, page->index, &ni);
......@@ -1011,7 +1018,7 @@ static int read_node_page(struct page *page, int rw)
return LOCKED_PAGE;
fio.blk_addr = ni.blk_addr;
return f2fs_submit_page_bio(sbi, page, &fio);
return f2fs_submit_page_bio(&fio);
}
/*
......@@ -1204,13 +1211,9 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
/* called by fsync() */
if (ino && IS_DNODE(page)) {
set_fsync_mark(page, 1);
if (IS_INODE(page)) {
if (!is_checkpointed_node(sbi, ino) &&
!has_fsynced_inode(sbi, ino))
set_dentry_mark(page, 1);
else
set_dentry_mark(page, 0);
}
if (IS_INODE(page))
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
nwritten++;
} else {
set_fsync_mark(page, 0);
......@@ -1293,8 +1296,11 @@ static int f2fs_write_node_page(struct page *page,
nid_t nid;
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
.page = page,
.encrypted_page = NULL,
};
trace_f2fs_writepage(page, NODE);
......@@ -1329,7 +1335,7 @@ static int f2fs_write_node_page(struct page *page,
set_page_writeback(page);
fio.blk_addr = ni.blk_addr;
write_node_page(sbi, page, nid, &fio);
write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
......
......@@ -343,28 +343,6 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
* - Mark cold node blocks in their node footer
* - Mark cold data pages in page cache
*/
static inline int is_file(struct inode *inode, int type)
{
return F2FS_I(inode)->i_advise & type;
}
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
}
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
#define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
#define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
static inline int is_cold_data(struct page *page)
{
return PageChecked(page);
......
......@@ -83,6 +83,11 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
goto out;
}
if (file_enc_name(inode)) {
iput(dir);
return 0;
}
name.len = le32_to_cpu(raw_inode->i_namelen);
name.name = raw_inode->i_name;
......@@ -143,6 +148,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
static void recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
i_size_write(inode, le64_to_cpu(raw->i_size));
......@@ -153,8 +159,13 @@ static void recover_inode(struct inode *inode, struct page *page)
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
if (file_enc_name(inode))
name = "<encrypted>";
else
name = F2FS_INODE(page)->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
ino_of_node(page), F2FS_INODE(page)->i_name);
ino_of_node(page), name);
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
......@@ -174,7 +185,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
while (1) {
struct fsync_inode_entry *entry;
if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = get_meta_page(sbi, blkaddr);
......@@ -349,7 +360,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int start, end;
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
int err = 0, recovered = 0;
......@@ -396,7 +406,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
dest = datablock_addr(page, dn.ofs_in_node);
if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
dest >= MAIN_BLKADDR(sbi) && dest < MAX_BLKADDR(sbi)) {
is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
......@@ -409,13 +419,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (err)
goto err;
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* write dummy data page */
recover_data_page(sbi, NULL, &sum, src, dest);
dn.data_blkaddr = dest;
set_data_blkaddr(&dn);
f2fs_update_extent_cache(&dn);
f2fs_replace_block(sbi, &dn, src, dest,
ni.version, false);
recovered++;
}
dn.ofs_in_node++;
......@@ -454,7 +460,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
while (1) {
struct fsync_inode_entry *entry;
if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
ra_meta_pages_cond(sbi, blkaddr);
......
This diff is collapsed.
......@@ -163,6 +163,7 @@ struct seg_entry {
*/
unsigned short ckpt_valid_blocks;
unsigned char *ckpt_valid_map;
unsigned char *discard_map;
unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */
};
......
This diff is collapsed.
......@@ -80,7 +80,7 @@ void f2fs_trace_pid(struct page *page)
radix_tree_preload_end();
}
void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush)
void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
{
struct inode *inode;
pid_t pid;
......@@ -91,8 +91,8 @@ void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush)
return;
}
inode = page->mapping->host;
pid = page_private(page);
inode = fio->page->mapping->host;
pid = page_private(fio->page);
major = MAJOR(inode->i_sb->s_dev);
minor = MINOR(inode->i_sb->s_dev);
......
......@@ -33,12 +33,12 @@ struct last_io_info {
};
extern void f2fs_trace_pid(struct page *);
extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int);
extern void f2fs_trace_ios(struct f2fs_io_info *, int);
extern void f2fs_build_trace_ios(void);
extern void f2fs_destroy_trace_ios(void);
#else
#define f2fs_trace_pid(p)
#define f2fs_trace_ios(p, i, n)
#define f2fs_trace_ios(i, n)
#define f2fs_build_trace_ios()
#define f2fs_destroy_trace_ios()
......
......@@ -584,6 +584,9 @@ static int __f2fs_setxattr(struct inode *inode, int index,
inode->i_ctime = CURRENT_TIME;
clear_inode_flag(fi, FI_ACL_MODE);
}
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
if (ipage)
update_inode(inode, ipage);
......
......@@ -35,6 +35,10 @@
#define F2FS_XATTR_INDEX_LUSTRE 5
#define F2FS_XATTR_INDEX_SECURITY 6
#define F2FS_XATTR_INDEX_ADVISE 7
/* Should be same as EXT4_XATTR_INDEX_ENCRYPTION */
#define F2FS_XATTR_INDEX_ENCRYPTION 9
#define F2FS_XATTR_NAME_ENCRYPTION_CONTEXT "c"
struct f2fs_xattr_header {
__le32 h_magic; /* magic number for identification */
......
......@@ -50,6 +50,8 @@
#define MAX_ACTIVE_NODE_LOGS 8
#define MAX_ACTIVE_DATA_LOGS 8
#define VERSION_LEN 256
/*
* For superblock
*/
......@@ -86,6 +88,12 @@ struct f2fs_super_block {
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
__u8 reserved[871]; /* valid reserved region */
} __packed;
/*
......
......@@ -13,6 +13,10 @@ TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
TRACE_DEFINE_ENUM(INMEM);
TRACE_DEFINE_ENUM(INMEM_DROP);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
TRACE_DEFINE_ENUM(CURSEG_WARM_DATA);
TRACE_DEFINE_ENUM(CURSEG_COLD_DATA);
......@@ -37,6 +41,7 @@ TRACE_DEFINE_ENUM(__REQ_META);
TRACE_DEFINE_ENUM(CP_UMOUNT);
TRACE_DEFINE_ENUM(CP_FASTBOOT);
TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
#define show_block_type(type) \
......@@ -112,6 +117,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ CP_DISCARD, "Discard" })
struct victim_sel_policy;
struct f2fs_map_blocks;
DECLARE_EVENT_CLASS(f2fs__inode,
......@@ -476,36 +482,35 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err)
);
TRACE_EVENT(f2fs_get_data_block,
TP_PROTO(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int ret),
TRACE_EVENT(f2fs_map_blocks,
TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
TP_ARGS(inode, iblock, bh, ret),
TP_ARGS(inode, map, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(sector_t, iblock)
__field(sector_t, bh_start)
__field(size_t, bh_size)
__field(block_t, m_lblk)
__field(block_t, m_pblk)
__field(unsigned int, m_len)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->iblock = iblock;
__entry->bh_start = bh->b_blocknr;
__entry->bh_size = bh->b_size;
__entry->m_lblk = map->m_lblk;
__entry->m_pblk = map->m_pblk;
__entry->m_len = map->m_len;
__entry->ret = ret;
),
TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
"start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
"start blkaddr = 0x%llx, len = 0x%llx, err = %d",
show_dev_ino(__entry),
(unsigned long long)__entry->iblock,
(unsigned long long)__entry->bh_start,
(unsigned long long)__entry->bh_size,
(unsigned long long)__entry->m_lblk,
(unsigned long long)__entry->m_pblk,
(unsigned long long)__entry->m_len,
__entry->ret)
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment