Commit 2eee010d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Lots of bug fixes and cleanups"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (40 commits)
  ext4: remove unused variable
  ext4: use journal inode to determine journal overhead
  ext4: create function to read journal inode
  ext4: unmap metadata when zeroing blocks
  ext4: remove plugging from ext4_file_write_iter()
  ext4: allow unlocked direct IO when pages are cached
  ext4: require encryption feature for EXT4_IOC_SET_ENCRYPTION_POLICY
  fscrypto: use standard macros to compute length of fname ciphertext
  ext4: do not unnecessarily null-terminate encrypted symlink data
  ext4: release bh in make_indexed_dir
  ext4: Allow parallel DIO reads
  ext4: allow DAX writeback for hole punch
  jbd2: fix lockdep annotation in add_transaction_credits()
  blockgroup_lock.h: simplify definition of NR_BG_LOCKS
  blockgroup_lock.h: remove debris from bgl_lock_ptr() conversion
  fscrypto: make filename crypto functions return 0 on success
  fscrypto: rename completion callbacks to reflect usage
  fscrypto: remove unnecessary includes
  fscrypto: improved validation when loading inode encryption metadata
  ext4: fix memory leak when symlink decryption fails
  ...
parents 513a4bef 18017479
......@@ -28,7 +28,6 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fscrypto.h>
#include <linux/ecryptfs.h>
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
......@@ -128,11 +127,11 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
EXPORT_SYMBOL(fscrypt_get_ctx);
/**
* fscrypt_complete() - The completion callback for page encryption
* @req: The asynchronous encryption request context
* @res: The result of the encryption operation
* page_crypt_complete() - completion callback for page crypto
* @req: The asynchronous cipher request context
* @res: The result of the cipher operation
*/
static void fscrypt_complete(struct crypto_async_request *req, int res)
static void page_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
......@@ -170,7 +169,7 @@ static int do_page_crypto(struct inode *inode,
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
fscrypt_complete, &ecr);
page_crypt_complete, &ecr);
BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
memcpy(xts_tweak, &index, sizeof(index));
......
......@@ -10,21 +10,16 @@
* This has not yet undergone a rigorous security audit.
*/
#include <keys/encrypted-type.h>
#include <keys/user-type.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <linux/fscrypto.h>
static u32 size_round_up(size_t size, size_t blksize)
{
return ((size + blksize - 1) / blksize) * blksize;
}
/**
* dir_crypt_complete() -
* fname_crypt_complete() - completion callback for filename crypto
* @req: The asynchronous cipher request context
* @res: The result of the cipher operation
*/
static void dir_crypt_complete(struct crypto_async_request *req, int res)
static void fname_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
......@@ -35,11 +30,11 @@ static void dir_crypt_complete(struct crypto_async_request *req, int res)
}
/**
* fname_encrypt() -
* fname_encrypt() - encrypt a filename
*
* This function encrypts the input filename, and returns the length of the
* ciphertext. Errors are returned as negative numbers. We trust the caller to
* allocate sufficient memory to oname string.
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
......@@ -60,10 +55,9 @@ static int fname_encrypt(struct inode *inode,
if (iname->len <= 0 || iname->len > lim)
return -EIO;
ciphertext_len = (iname->len < FS_CRYPTO_BLOCK_SIZE) ?
FS_CRYPTO_BLOCK_SIZE : iname->len;
ciphertext_len = size_round_up(ciphertext_len, padding);
ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
ciphertext_len = round_up(ciphertext_len, padding);
ciphertext_len = min(ciphertext_len, lim);
if (ciphertext_len <= sizeof(buf)) {
workbuf = buf;
......@@ -84,7 +78,7 @@ static int fname_encrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
dir_crypt_complete, &ecr);
fname_crypt_complete, &ecr);
/* Copy the input */
memcpy(workbuf, iname->name, iname->len);
......@@ -105,20 +99,22 @@ static int fname_encrypt(struct inode *inode,
}
kfree(alloc_buf);
skcipher_request_free(req);
if (res < 0)
if (res < 0) {
printk_ratelimited(KERN_ERR
"%s: Error (error code %d)\n", __func__, res);
return res;
}
oname->len = ciphertext_len;
return res;
return 0;
}
/*
* fname_decrypt()
* This function decrypts the input filename, and returns
* the length of the plaintext.
* Errors are returned as negative numbers.
* We trust the caller to allocate sufficient memory to oname string.
/**
* fname_decrypt() - decrypt a filename
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
static int fname_decrypt(struct inode *inode,
const struct fscrypt_str *iname,
......@@ -146,7 +142,7 @@ static int fname_decrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
dir_crypt_complete, &ecr);
fname_crypt_complete, &ecr);
/* Initialize IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
......@@ -168,7 +164,7 @@ static int fname_decrypt(struct inode *inode,
}
oname->len = strnlen(oname->name, iname->len);
return oname->len;
return 0;
}
static const char *lookup_table =
......@@ -231,9 +227,8 @@ u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
if (ci)
padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
if (ilen < FS_CRYPTO_BLOCK_SIZE)
ilen = FS_CRYPTO_BLOCK_SIZE;
return size_round_up(ilen, padding);
ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
return round_up(ilen, padding);
}
EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
......@@ -279,6 +274,10 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
/**
* fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
* space
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
......@@ -287,13 +286,12 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
{
const struct qstr qname = FSTR_TO_QSTR(iname);
char buf[24];
int ret;
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
return oname->len;
return 0;
}
if (iname->len < FS_CRYPTO_BLOCK_SIZE)
......@@ -303,9 +301,9 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
return fname_decrypt(inode, iname, oname);
if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
ret = digest_encode(iname->name, iname->len, oname->name);
oname->len = ret;
return ret;
oname->len = digest_encode(iname->name, iname->len,
oname->name);
return 0;
}
if (hash) {
memcpy(buf, &hash, 4);
......@@ -315,15 +313,18 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
}
memcpy(buf + 8, iname->name + iname->len - 16, 16);
oname->name[0] = '_';
ret = digest_encode(buf, 24, oname->name + 1);
oname->len = ret + 1;
return ret + 1;
oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
/**
* fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
* space
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_usr_to_disk(struct inode *inode,
const struct qstr *iname,
......@@ -333,7 +334,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
return oname->len;
return 0;
}
if (inode->i_crypt_info)
return fname_encrypt(inode, iname, oname);
......@@ -367,10 +368,10 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (dir->i_crypt_info) {
ret = fscrypt_fname_alloc_buffer(dir, iname->len,
&fname->crypto_buf);
if (ret < 0)
if (ret)
return ret;
ret = fname_encrypt(dir, iname, &fname->crypto_buf);
if (ret < 0)
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
......
......@@ -8,11 +8,8 @@
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
*/
#include <keys/encrypted-type.h>
#include <keys/user-type.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <uapi/linux/keyctl.h>
#include <linux/fscrypto.h>
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
......@@ -139,6 +136,38 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
return res;
}
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
const char **cipher_str_ret, int *keysize_ret)
{
if (S_ISREG(inode->i_mode)) {
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
*cipher_str_ret = "xts(aes)";
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
}
pr_warn_once("fscrypto: unsupported contents encryption mode "
"%d for inode %lu\n",
ci->ci_data_mode, inode->i_ino);
return -ENOKEY;
}
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
*cipher_str_ret = "cts(cbc(aes))";
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
return 0;
}
pr_warn_once("fscrypto: unsupported filenames encryption mode "
"%d for inode %lu\n",
ci->ci_filename_mode, inode->i_ino);
return -ENOKEY;
}
pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
(inode->i_mode & S_IFMT), inode->i_ino);
return -ENOKEY;
}
static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
......@@ -155,8 +184,8 @@ int get_crypt_info(struct inode *inode)
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
u8 raw_key[FS_MAX_KEY_SIZE];
u8 mode;
int res;
res = fscrypt_initialize();
......@@ -179,13 +208,19 @@ int get_crypt_info(struct inode *inode)
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode))
return res;
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
ctx.flags = 0;
} else if (res != sizeof(ctx)) {
return -EINVAL;
}
res = 0;
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
......@@ -198,27 +233,11 @@ int get_crypt_info(struct inode *inode)
crypt_info->ci_keyring_key = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
if (S_ISREG(inode->i_mode))
mode = crypt_info->ci_data_mode;
else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
mode = crypt_info->ci_filename_mode;
else
BUG();
switch (mode) {
case FS_ENCRYPTION_MODE_AES_256_XTS:
cipher_str = "xts(aes)";
break;
case FS_ENCRYPTION_MODE_AES_256_CTS:
cipher_str = "cts(cbc(aes))";
break;
default:
printk_once(KERN_WARNING
"%s: unsupported key mode %d (ino %u)\n",
__func__, mode, (unsigned) inode->i_ino);
res = -ENOKEY;
res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
if (res)
goto out;
}
if (fscrypt_dummy_context_enabled(inode)) {
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key;
......@@ -253,7 +272,7 @@ int get_crypt_info(struct inode *inode)
crypt_info->ci_ctfm = ctfm;
crypto_skcipher_clear_flags(ctfm, ~0);
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode));
res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
if (res)
goto out;
......
......@@ -260,11 +260,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
0, 0, &de_name, &fstr);
de_name = fstr;
fstr.len = save_len;
if (err < 0)
if (err)
goto errout;
if (!dir_emit(ctx,
fstr.name, err,
de_name.name, de_name.len,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type)))
goto done;
......@@ -627,7 +628,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
int buf_size)
{
struct ext4_dir_entry_2 *de;
int nlen, rlen;
int rlen;
unsigned int offset = 0;
char *top;
......@@ -637,7 +638,6 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
if (ext4_check_dir_entry(dir, NULL, de, bh,
buf, buf_size, offset))
return -EFSCORRUPTED;
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
......
......@@ -262,6 +262,9 @@ struct ext4_io_submit {
(s)->s_first_ino)
#endif
#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
blkbits))
/* Translate a block number to a cluster number */
#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
......@@ -1117,9 +1120,15 @@ struct ext4_inode_info {
#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
#define EXT4_MOUNT_QUOTA 0x40000 /* Some quota option set */
#define EXT4_MOUNT_USRQUOTA 0x80000 /* "old" user quota,
* enable enforcement for hidden
* quota files */
#define EXT4_MOUNT_GRPQUOTA 0x100000 /* "old" group quota, enable
* enforcement for hidden quota
* files */
#define EXT4_MOUNT_PRJQUOTA 0x200000 /* Enable project quota
* enforcement */
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
......@@ -1636,26 +1645,6 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
* Feature set definitions
*/
/* Use the ext4_{has,set,clear}_feature_* helpers; these will be removed */
#define EXT4_HAS_COMPAT_FEATURE(sb,mask) \
((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0)
#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask) \
((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0)
#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask) \
((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0)
#define EXT4_SET_COMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
#define EXT4_SET_INCOMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
#define EXT4_CLEAR_COMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
#define EXT4_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
#define EXT4_CLEAR_INCOMPAT_FEATURE(sb,mask) \
EXT4_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
#define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001
#define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002
#define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004
......
......@@ -4679,6 +4679,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
unsigned int credits;
loff_t epos;
BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
map.m_lblk = offset;
map.m_len = len;
/*
......@@ -4693,13 +4694,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, len);
/*
* We can only call ext_depth() on extent based inodes
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
depth = ext_depth(inode);
else
depth = -1;
retry:
while (ret >= 0 && len) {
......@@ -4966,13 +4961,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
trace_ext4_fallocate_enter(inode, offset, len, mode);
lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- lblk;
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
......@@ -5035,12 +5025,8 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
map.m_lblk);
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
/*
* This is somewhat ugly but the idea is clear: When transaction is
* reserved, everything goes into it. Otherwise we rather start several
......@@ -5734,6 +5720,9 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
} else {
ext4_ext_drop_refs(path);
kfree(path);
}
ret = ext4_es_remove_extent(inode, offset_lblk,
......
......@@ -91,9 +91,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
static ssize_t
ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(iocb->ki_filp);
struct blk_plug plug;
int o_direct = iocb->ki_flags & IOCB_DIRECT;
int unaligned_aio = 0;
int overwrite = 0;
......@@ -134,18 +132,16 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (o_direct) {
size_t length = iov_iter_count(from);
loff_t pos = iocb->ki_pos;
blk_start_plug(&plug);
/* check whether we do a DIO overwrite or not */
if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
!file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
pos + length <= i_size_read(inode)) {
struct ext4_map_blocks map;
unsigned int blkbits = inode->i_blkbits;
int err, len;
map.m_lblk = pos >> blkbits;
map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
- map.m_lblk;
map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
len = map.m_len;
err = ext4_map_blocks(NULL, inode, &map, 0);
......@@ -171,8 +167,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret > 0)
ret = generic_write_sync(iocb, ret);
if (o_direct)
blk_finish_plug(&plug);
return ret;
......
......@@ -61,6 +61,13 @@ static int ext4_sync_parent(struct inode *inode)
break;
iput(inode);
inode = next;
/*
* The directory inode may have gone through rmdir by now. But
* the inode itself and its blocks are still allocated (we hold
* a reference to the inode so it didn't go through
* ext4_evict_inode()) and so we are safe to flush metadata
* blocks and the inode.
*/
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
......@@ -107,7 +114,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!journal) {
ret = __generic_file_fsync(file, start, end, datasync);
if (!ret && !hlist_empty(&inode->i_dentry))
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
goto issue_flush;
......
......@@ -802,7 +802,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
} else
inode_init_owner(inode, dir, mode);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
if (ext4_has_feature_project(sb) &&
ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
ei->i_projid = EXT4_I(dir)->i_projid;
else
......
......@@ -647,11 +647,19 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed.
* use them before they are really zeroed. We also have to
* unmap metadata before zeroing as otherwise writeback can
* overwrite zeros with stale data from block device.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ext4_lblk_t i;
for (i = 0; i < map->m_len; i++) {
unmap_underlying_metadata(inode->i_sb->s_bdev,
map->m_pblk + i);
}
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
......@@ -1649,6 +1657,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
if (invalidate) {
if (page_mapped(page))
clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
}
......@@ -3526,35 +3536,31 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
int unlocked = 0;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
if (ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
* via ext4_inode_block_unlocked_dio(). Check inode's state
* while holding extra i_dio_count ref.
* Shared inode_lock is enough for us - it protects against concurrent
* writes & truncates and since we take care of writing back page cache,
* we are protected against page writeback as well.
*/
inode_dio_begin(inode);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
EXT4_STATE_DIOREAD_LOCK)))
inode_dio_end(inode);
else
unlocked = 1;
}
inode_lock_shared(inode);
if (IS_DAX(inode)) {
ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block,
NULL, unlocked ? 0 : DIO_LOCKING);
ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
} else {
size_t count = iov_iter_count(iter);
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count);
if (ret)
goto out_unlock;
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, ext4_dio_get_block,
NULL, NULL,
unlocked ? 0 : DIO_LOCKING);
NULL, NULL, 0);
}
if (unlocked)
inode_dio_end(inode);
out_unlock:
inode_unlock_shared(inode);
return ret;
}
......@@ -3890,7 +3896,7 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
}
/*
* ext4_punch_hole: punches a hole in a file by releaseing the blocks
* ext4_punch_hole: punches a hole in a file by releasing the blocks
* associated with the given offset and length
*
* @inode: File inode
......@@ -3919,7 +3925,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
......@@ -4414,7 +4420,7 @@ static inline void ext4_iget_extra_inode(struct inode *inode,
int ext4_get_projid(struct inode *inode, kprojid_t *projid)
{
if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_PROJECT))
if (!ext4_has_feature_project(inode->i_sb))
return -EOPNOTSUPP;
*projid = EXT4_I(inode)->i_projid;
return 0;
......@@ -4481,7 +4487,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) &&
if (ext4_has_feature_project(sb) &&
EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
......@@ -4814,14 +4820,14 @@ static int ext4_do_update_inode(handle_t *handle,
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (!ei->i_dtime) {
if (ei->i_dtime && list_empty(&ei->i_orphan)) {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
} else {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(i_gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
......@@ -4885,8 +4891,7 @@ static int ext4_do_update_inode(handle_t *handle,
}
}
BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_PROJECT) &&
BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
i_projid != EXT4_DEF_PROJID);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
......
......@@ -19,8 +19,6 @@
#include "ext4_jbd2.h"
#include "ext4.h"
#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
/**
* Swap memory between @a and @b for @len bytes.
*
......@@ -310,8 +308,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
struct ext4_inode *raw_inode;
struct dquot *transfer_to[MAXQUOTAS] = { };
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_PROJECT)) {
if (!ext4_has_feature_project(sb)) {
if (projid != EXT4_DEF_PROJID)
return -EOPNOTSUPP;
else
......@@ -772,6 +769,9 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct fscrypt_policy policy;
if (!ext4_has_feature_encrypt(sb))
return -EOPNOTSUPP;
if (copy_from_user(&policy,
(struct fscrypt_policy __user *)arg,
sizeof(policy)))
......@@ -842,8 +842,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ext4_get_inode_flags(ei);
fa.fsx_xflags = ext4_iflags_to_xflags(ei->i_flags & EXT4_FL_USER_VISIBLE);
if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_RO_COMPAT_PROJECT)) {
if (ext4_has_feature_project(inode->i_sb)) {
fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
EXT4_I(inode)->i_projid);
}
......
......@@ -598,6 +598,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
return -EOPNOTSUPP;
}
if (ext4_encrypted_inode(orig_inode) ||
ext4_encrypted_inode(donor_inode)) {
ext4_msg(orig_inode->i_sb, KERN_ERR,
"Online defrag not supported for encrypted files");
return -EOPNOTSUPP;
}
/* Protect orig and donor inodes against a truncate */
lock_two_nondirectories(orig_inode, donor_inode);
......
......@@ -639,7 +639,7 @@ static struct stats dx_show_leaf(struct inode *dir,
res = fscrypt_fname_alloc_buffer(
dir, len,
&fname_crypto_str);
if (res < 0)
if (res)
printk(KERN_WARNING "Error "
"allocating crypto "
"buffer--skipping "
......@@ -647,7 +647,7 @@ static struct stats dx_show_leaf(struct inode *dir,
res = fscrypt_fname_disk_to_usr(dir,
0, 0, &de_name,
&fname_crypto_str);
if (res < 0) {
if (res) {
printk(KERN_WARNING "Error "
"converting filename "
"from disk to usr"
......@@ -1011,7 +1011,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
err = fscrypt_fname_disk_to_usr(dir, hinfo->hash,
hinfo->minor_hash, &de_name,
&fname_crypto_str);
if (err < 0) {
if (err) {
count = err;
goto errout;
}
......@@ -2044,33 +2044,31 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
bh = bh2;
retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (retval)
goto out_frames;
retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
retval = ext4_handle_dirty_dirent_node(handle, dir, bh2);
if (retval)
goto out_frames;
de = do_split(handle,dir, &bh, frame, &fname->hinfo);
de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
if (IS_ERR(de)) {
retval = PTR_ERR(de);
goto out_frames;
}
dx_release(frames);
retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
brelse(bh);
return retval;
retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
out_frames:
/*
* Even if the block split failed, we have to properly write
* out all the changes we did so far. Otherwise we can end up
* with corrupted filesystem.
*/
if (retval)
ext4_mark_inode_dirty(handle, dir);
dx_release(frames);
brelse(bh2);
return retval;
}
......@@ -3144,7 +3142,7 @@ static int ext4_symlink(struct inode *dir,
istr.name = (const unsigned char *) symname;
istr.len = len;
err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
if (err < 0)
if (err)
goto err_drop_inode;
sd->len = cpu_to_le16(ostr.len);
disk_link.name = (char *) sd;
......
......@@ -405,14 +405,12 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
{
struct page *data_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start, blocksize;
unsigned block_start;
struct buffer_head *bh, *head;
int ret = 0;
int nr_submitted = 0;
int nr_to_submit = 0;
blocksize = 1 << inode->i_blkbits;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
......
......@@ -78,6 +78,8 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly);
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
static void ext4_clear_request_list(void);
static struct inode *ext4_get_journal_inode(struct super_block *sb,
unsigned int journal_inum);
/*
* Lock ordering
......@@ -1267,7 +1269,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
Opt_lazytime, Opt_nolazytime,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
......@@ -1327,6 +1329,7 @@ static const match_table_t tokens = {
{Opt_noquota, "noquota"},
{Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_prjquota, "prjquota"},
{Opt_barrier, "barrier=%u"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
......@@ -1546,8 +1549,11 @@ static const struct mount_opts {
MOPT_SET | MOPT_Q},
{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
MOPT_SET | MOPT_Q},
{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
MOPT_SET | MOPT_Q},
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q},
EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
MOPT_CLEAR | MOPT_Q},
{Opt_usrjquota, 0, MOPT_Q},
{Opt_grpjquota, 0, MOPT_Q},
{Opt_offusrjquota, 0, MOPT_Q},
......@@ -1836,13 +1842,17 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
#ifdef CONFIG_QUOTA
if (ext4_has_feature_quota(sb) &&
(test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
"mount options ignored.");
clear_opt(sb, USRQUOTA);
clear_opt(sb, GRPQUOTA);
} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
return 0;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
......@@ -2741,7 +2751,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
sb = elr->lr_super;
ngroups = EXT4_SB(sb)->s_groups_count;
sb_start_write(sb);
for (group = elr->lr_next_group; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) {
......@@ -2768,8 +2777,6 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
elr->lr_next_sched = jiffies + elr->lr_timeout;
elr->lr_next_group = group + 1;
}
sb_end_write(sb);
return ret;
}
......@@ -2834,19 +2841,43 @@ static int ext4_lazyinit_thread(void *arg)
mutex_unlock(&eli->li_list_mtx);
goto exit_thread;
}
list_for_each_safe(pos, n, &eli->li_request_list) {
int err = 0;
int progress = 0;
elr = list_entry(pos, struct ext4_li_request,
lr_request);
if (time_after_eq(jiffies, elr->lr_next_sched)) {
if (ext4_run_li_request(elr) != 0) {
if (time_before(jiffies, elr->lr_next_sched)) {
if (time_before(elr->lr_next_sched, next_wakeup))
next_wakeup = elr->lr_next_sched;
continue;
}
if (down_read_trylock(&elr->lr_super->s_umount)) {
if (sb_start_write_trylock(elr->lr_super)) {
progress = 1;
/*
* We hold sb->s_umount, sb can not
* be removed from the list, it is
* now safe to drop li_list_mtx
*/
mutex_unlock(&eli->li_list_mtx);
err = ext4_run_li_request(elr);
sb_end_write(elr->lr_super);
mutex_lock(&eli->li_list_mtx);
n = pos->next;
}
up_read((&elr->lr_super->s_umount));
}
/* error, remove the lazy_init job */
if (err) {
ext4_remove_li_request(elr);
continue;
}
if (!progress) {
elr->lr_next_sched = jiffies +
(prandom_u32()
% (EXT4_DEF_LI_MAX_START_DELAY * HZ));
}
if (time_before(elr->lr_next_sched, next_wakeup))
next_wakeup = elr->lr_next_sched;
}
......@@ -3179,6 +3210,8 @@ int ext4_calculate_overhead(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
struct inode *j_inode;
unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
char *buf = (char *) get_zeroed_page(GFP_NOFS);
......@@ -3209,10 +3242,23 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
/* Add the internal journal blocks as well */
/*
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
j_inode = ext4_get_journal_inode(sb, j_inum);
if (j_inode) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
overhead += EXT4_NUM_B2C(sbi, j_blocks);
iput(j_inode);
} else {
ext4_msg(sb, KERN_ERR, "can't get journal size");
}
}
sbi->s_overhead = overhead;
smp_wmb();
free_page((unsigned long) buf);
......@@ -4208,18 +4254,16 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
write_unlock(&journal->j_state_lock);
}
static journal_t *ext4_get_journal(struct super_block *sb,
static struct inode *ext4_get_journal_inode(struct super_block *sb,
unsigned int journal_inum)
{
struct inode *journal_inode;
journal_t *journal;
BUG_ON(!ext4_has_feature_journal(sb));
/* First, test for the existence of a valid inode on disk. Bad
* things happen if we iget() an unused inode, as the subsequent
* iput() will try to delete it. */
/*
* Test for the existence of a valid inode on disk. Bad things
* happen if we iget() an unused inode, as the subsequent iput()
* will try to delete it.
*/
journal_inode = ext4_iget(sb, journal_inum);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
......@@ -4239,6 +4283,20 @@ static journal_t *ext4_get_journal(struct super_block *sb,
iput(journal_inode);
return NULL;
}
return journal_inode;
}
static journal_t *ext4_get_journal(struct super_block *sb,
unsigned int journal_inum)
{
struct inode *journal_inode;
journal_t *journal;
BUG_ON(!ext4_has_feature_journal(sb));
journal_inode = ext4_get_journal_inode(sb, journal_inum);
if (!journal_inode)
return NULL;
journal = jbd2_journal_init_inode(journal_inode);
if (!journal) {
......@@ -5250,12 +5308,18 @@ static int ext4_enable_quotas(struct super_block *sb)
le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
};
bool quota_mopt[EXT4_MAXQUOTAS] = {
test_opt(sb, USRQUOTA),
test_opt(sb, GRPQUOTA),
test_opt(sb, PRJQUOTA),
};
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
for (type = 0; type < EXT4_MAXQUOTAS; type++) {
if (qf_inums[type]) {
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED);
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
ext4_warning(sb,
"Failed to enable quota tracking "
......
......@@ -30,7 +30,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
char *caddr, *paddr = NULL;
struct fscrypt_str cstr, pstr;
struct fscrypt_symlink_data *sd;
loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
int res;
u32 max_size = inode->i_sb->s_blocksize;
......@@ -49,7 +48,6 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
if (IS_ERR(cpage))
return ERR_CAST(cpage);
caddr = page_address(cpage);
caddr[size] = 0;
}
/* Symlink is encrypted */
......@@ -65,16 +63,14 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
if (res)
goto errout;
paddr = pstr.name;
res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
if (res < 0)
if (res)
goto errout;
paddr = pstr.name;
/* Null-terminate the name */
if (res <= pstr.len)
paddr[res] = '\0';
paddr[pstr.len] = '\0';
if (cpage)
put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
......
This diff is collapsed.
......@@ -813,12 +813,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
int ret;
int err;
ret = fscrypt_fname_disk_to_usr(d->inode,
err = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0,
&de_name, fstr);
if (ret < 0)
if (err)
return true;
de_name = *fstr;
......
......@@ -454,7 +454,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
ostr.name = sd->encrypted_path;
ostr.len = disk_link.len;
err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
if (err < 0)
if (err)
goto err_out;
sd->len = cpu_to_le16(ostr.len);
......@@ -1051,7 +1051,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
goto errout;
res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
if (res < 0)
if (res)
goto errout;
/* this is broken symlink case */
......@@ -1063,7 +1063,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
paddr = pstr.name;
/* Null-terminate the name */
paddr[res] = '\0';
paddr[pstr.len] = '\0';
put_page(cpage);
set_delayed_call(done, kfree_link, paddr);
......
......@@ -1090,11 +1090,15 @@ static void jbd2_stats_proc_exit(journal_t *journal)
* very few fields yet: that has to wait until we have created the
* journal structures from from scratch, or loaded them from disk. */
static journal_t * journal_init_common (void)
static journal_t *journal_init_common(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int blocksize)
{
static struct lock_class_key jbd2_trans_commit_key;
journal_t *journal;
int err;
struct buffer_head *bh;
int n;
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
if (!journal)
......@@ -1131,6 +1135,32 @@ static journal_t * journal_init_common (void)
lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
&jbd2_trans_commit_key, 0);
/* journal descriptor can store up to n blocks -bzzz */
journal->j_blocksize = blocksize;
journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
n = journal->j_blocksize / sizeof(journal_block_tag_t);
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
GFP_KERNEL);
if (!journal->j_wbuf) {
kfree(journal);
return NULL;
}
bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
if (!bh) {
pr_err("%s: Cannot get buffer for journal superblock\n",
__func__);
kfree(journal->j_wbuf);
kfree(journal);
return NULL;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
}
......@@ -1157,51 +1187,21 @@ static journal_t * journal_init_common (void)
* range of blocks on an arbitrary block device.
*
*/
journal_t * jbd2_journal_init_dev(struct block_device *bdev,
journal_t *jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int blocksize)
{
journal_t *journal = journal_init_common();
struct buffer_head *bh;
int n;
journal_t *journal;
journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
if (!journal)
return NULL;
/* journal descriptor can store up to n blocks -bzzz */
journal->j_blocksize = blocksize;
journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
bdevname(journal->j_dev, journal->j_devname);
strreplace(journal->j_devname, '/', '!');
jbd2_stats_proc_init(journal);
n = journal->j_blocksize / sizeof(journal_block_tag_t);
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
bh = __getblk(journal->j_dev, start, journal->j_blocksize);
if (!bh) {
printk(KERN_ERR
"%s: Cannot get buffer for journal superblock\n",
__func__);
goto out_err;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
out_err:
kfree(journal->j_wbuf);
jbd2_stats_proc_exit(journal);
kfree(journal);
return NULL;
}
/**
......@@ -1212,67 +1212,36 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
* the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated.
*/
journal_t * jbd2_journal_init_inode (struct inode *inode)
journal_t *jbd2_journal_init_inode(struct inode *inode)
{
struct buffer_head *bh;
journal_t *journal = journal_init_common();
journal_t *journal;
char *p;
int err;
int n;
unsigned long long blocknr;
blocknr = bmap(inode, 0);
if (!blocknr) {
pr_err("%s: Cannot locate journal superblock\n",
__func__);
return NULL;
}
jbd_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n",
inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev,
blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits,
inode->i_sb->s_blocksize);
if (!journal)
return NULL;
journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
journal->j_inode = inode;
bdevname(journal->j_dev, journal->j_devname);
p = strreplace(journal->j_devname, '/', '!');
sprintf(p, "-%lu", journal->j_inode->i_ino);
jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
journal, inode->i_sb->s_id, inode->i_ino,
(long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
journal->j_blocksize = inode->i_sb->s_blocksize;
jbd2_stats_proc_init(journal);
/* journal descriptor can store up to n blocks -bzzz */
n = journal->j_blocksize / sizeof(journal_block_tag_t);
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
__func__);
goto out_err;
}
err = jbd2_journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
printk(KERN_ERR "%s: Cannot locate journal superblock\n",
__func__);
goto out_err;
}
bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh) {
printk(KERN_ERR
"%s: Cannot get buffer for journal superblock\n",
__func__);
goto out_err;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
out_err:
kfree(journal->j_wbuf);
jbd2_stats_proc_exit(journal);
kfree(journal);
return NULL;
}
/*
......
......@@ -159,6 +159,7 @@ static void wait_transaction_locked(journal_t *journal)
read_unlock(&journal->j_state_lock);
if (need_to_start)
jbd2_log_start_commit(journal, tid);
jbd2_might_wait_for_commit(journal);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
}
......@@ -182,8 +183,6 @@ static int add_transaction_credits(journal_t *journal, int blocks,
int needed;
int total = blocks + rsv_blocks;
jbd2_might_wait_for_commit(journal);
/*
* If the current transaction is locked down for commit, wait
* for the lock to be released.
......@@ -214,6 +213,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
if (atomic_read(&journal->j_reserved_credits) + total >
journal->j_max_transaction_buffers) {
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + total <=
journal->j_max_transaction_buffers);
......@@ -238,6 +238,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
write_lock(&journal->j_state_lock);
if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
__jbd2_log_wait_for_space(journal);
......@@ -255,6 +256,7 @@ static int add_transaction_credits(journal_t *journal, int blocks,
sub_reserved_credits(journal, rsv_blocks);
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
wait_event(journal->j_wait_reserved,
atomic_read(&journal->j_reserved_credits) + rsv_blocks
<= journal->j_max_transaction_buffers / 2);
......
......@@ -366,7 +366,11 @@ struct mb_cache *mb_cache_create(int bucket_bits)
cache->c_shrink.count_objects = mb_cache_count;
cache->c_shrink.scan_objects = mb_cache_scan;
cache->c_shrink.seeks = DEFAULT_SEEKS;
register_shrinker(&cache->c_shrink);
if (register_shrinker(&cache->c_shrink)) {
kfree(cache->c_hash);
kfree(cache);
goto err_out;
}
INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
......
......@@ -10,28 +10,10 @@
#include <linux/cache.h>
#ifdef CONFIG_SMP
/*
* We want a power-of-two. Is there a better way than this?
*/
#if NR_CPUS >= 32
#define NR_BG_LOCKS 128
#elif NR_CPUS >= 16
#define NR_BG_LOCKS 64
#elif NR_CPUS >= 8
#define NR_BG_LOCKS 32
#elif NR_CPUS >= 4
#define NR_BG_LOCKS 16
#elif NR_CPUS >= 2
#define NR_BG_LOCKS 8
#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
#else
#define NR_BG_LOCKS 4
#endif
#else /* CONFIG_SMP */
#define NR_BG_LOCKS 1
#endif /* CONFIG_SMP */
#endif
struct bgl_lock {
spinlock_t lock;
......@@ -49,14 +31,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl)
spin_lock_init(&bgl->locks[i].lock);
}
/*
* The accessor is a macro so we can embed a blockgroup_lock into different
* superblock types
*/
static inline spinlock_t *
bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
{
return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
}
#endif
......@@ -111,23 +111,6 @@ struct fscrypt_completion_result {
struct fscrypt_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
static inline int fscrypt_key_size(int mode)
{
switch (mode) {
case FS_ENCRYPTION_MODE_AES_256_XTS:
return FS_AES_256_XTS_KEY_SIZE;
case FS_ENCRYPTION_MODE_AES_256_GCM:
return FS_AES_256_GCM_KEY_SIZE;
case FS_ENCRYPTION_MODE_AES_256_CBC:
return FS_AES_256_CBC_KEY_SIZE;
case FS_ENCRYPTION_MODE_AES_256_CTS:
return FS_AES_256_CTS_KEY_SIZE;
default:
BUG();
}
return 0;
}
#define FS_FNAME_NUM_SCATTER_ENTRIES 4
#define FS_CRYPTO_BLOCK_SIZE 16
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
......@@ -202,13 +185,6 @@ static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
}
static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
{
if (size == fscrypt_key_size(mode))
return size;
return 0;
}
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
if (str->len == 1 && str->name[0] == '.')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment