Commit 9f67672a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "New features for ext4 this cycle include support for encrypted
  casefold, ensure that deleted file names are cleared in directory
  blocks by zeroing directory entries when they are unlinked or moved as
  part of a hash tree node split. We also improve the block allocator's
  performance on a freshly mounted file system by prefetching block
  bitmaps.

  There are also the usual cleanups and bug fixes, including fixing a
  page cache invalidation race when there is mixed buffered and direct
  I/O and the block size is less than page size, and allow the dax flag
  to be set and cleared on inline directories"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (32 commits)
  ext4: wipe ext4_dir_entry2 upon file deletion
  ext4: Fix occasional generic/418 failure
  fs: fix reporting supported extra file attributes for statx()
  ext4: allow the dax flag to be set and cleared on inline directories
  ext4: fix debug format string warning
  ext4: fix trailing whitespace
  ext4: fix various seppling typos
  ext4: fix error return code in ext4_fc_perform_commit()
  ext4: annotate data race in jbd2_journal_dirty_metadata()
  ext4: annotate data race in start_this_handle()
  ext4: fix ext4_error_err save negative errno into superblock
  ext4: fix error code in ext4_commit_super
  ext4: always panic when errors=panic is specified
  ext4: delete redundant uptodate check for buffer
  ext4: do not set SB_ACTIVE in ext4_orphan_cleanup()
  ext4: make prefetch_block_bitmaps default
  ext4: add proc files to monitor new structures
  ext4: improve cr 0 / cr 1 group scanning
  ext4: add MB_NUM_ORDERS macro
  ext4: add mballoc stats proc file
  ...
parents 6bab076a 6c091273
......@@ -121,6 +121,31 @@ The directory file type is one of the following values:
* - 0x7
- Symbolic link.
To support directories that are both encrypted and casefolded directories, we
must also include hash information in the directory entry. We append
``ext4_extended_dir_entry_2`` to ``ext4_dir_entry_2`` except for the entries
for dot and dotdot, which are kept the same. The structure follows immediately
after ``name`` and is included in the size listed by ``rec_len`` If a directory
entry uses this extension, it may be up to 271 bytes.
.. list-table::
:widths: 8 8 24 40
:header-rows: 1
* - Offset
- Size
- Name
- Description
* - 0x0
- \_\_le32
- hash
- The hash of the directory name
* - 0x4
- \_\_le32
- minor\_hash
- The minor hash of the directory name
In order to add checksums to these classic directory blocks, a phony
``struct ext4_dir_entry`` is placed at the end of each leaf block to
hold the checksum. The directory entry is 12 bytes long. The inode
......@@ -322,6 +347,8 @@ The directory hash is one of the following values:
- Half MD4, unsigned.
* - 0x5
- Tea, unsigned.
* - 0x6
- Siphash.
Interior nodes of an htree are recorded as ``struct dx_node``, which is
also the full length of a data block:
......
......@@ -239,7 +239,7 @@ unsigned ext4_free_clusters_after_init(struct super_block *sb,
ext4_group_t block_group,
struct ext4_group_desc *gdp)
{
return num_clusters_in_group(sb, block_group) -
return num_clusters_in_group(sb, block_group) -
ext4_num_overhead_clusters(sb, block_group, gdp);
}
......
......@@ -55,6 +55,18 @@ static int is_dx_dir(struct inode *inode)
return 0;
}
static bool is_fake_dir_entry(struct ext4_dir_entry_2 *de)
{
/* Check if . or .. , or skip if namelen is 0 */
if ((de->name_len > 0) && (de->name_len <= 2) && (de->name[0] == '.') &&
(de->name[1] == '.' || de->name[1] == '\0'))
return true;
/* Check if this is a csum entry */
if (de->file_type == EXT4_FT_DIR_CSUM)
return true;
return false;
}
/*
* Return 0 if the directory entry is OK, and 1 if there is a problem
*
......@@ -73,16 +85,20 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
const int rlen = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
const int next_offset = ((char *) de - buf) + rlen;
bool fake = is_fake_dir_entry(de);
bool has_csum = ext4_has_metadata_csum(dir->i_sb);
if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
if (unlikely(rlen < ext4_dir_rec_len(1, fake ? NULL : dir)))
error_msg = "rec_len is smaller than minimal";
else if (unlikely(rlen % 4 != 0))
error_msg = "rec_len % 4 != 0";
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
else if (unlikely(rlen < ext4_dir_rec_len(de->name_len,
fake ? NULL : dir)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(next_offset > size))
error_msg = "directory entry overrun";
else if (unlikely(next_offset > size - EXT4_DIR_REC_LEN(1) &&
else if (unlikely(next_offset > size - ext4_dir_rec_len(1,
has_csum ? NULL : dir) &&
next_offset != size))
error_msg = "directory entry too close to block end";
else if (unlikely(le32_to_cpu(de->inode) >
......@@ -94,15 +110,15 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
"bad entry in directory: %s - offset=%u, "
"inode=%u, rec_len=%d, name_len=%d, size=%d",
"inode=%u, rec_len=%d, size=%d fake=%d",
error_msg, offset, le32_to_cpu(de->inode),
rlen, de->name_len, size);
rlen, size, fake);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
"bad entry in directory: %s - offset=%u, "
"inode=%u, rec_len=%d, name_len=%d, size=%d",
"inode=%u, rec_len=%d, size=%d fake=%d",
error_msg, offset, le32_to_cpu(de->inode),
rlen, de->name_len, size);
rlen, size, fake);
return 1;
}
......@@ -124,9 +140,9 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(file, ctx);
if (err != ERR_BAD_DX_DIR) {
if (err != ERR_BAD_DX_DIR)
return err;
}
/* Can we just clear INDEX flag to ignore htree information? */
if (!ext4_has_metadata_csum(sb)) {
/*
......@@ -224,7 +240,8 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
* failure will be detected in the
* dirent test below. */
if (ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
sb->s_blocksize) < ext4_dir_rec_len(1,
inode))
break;
i += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
......@@ -265,7 +282,9 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
/* Directory is encrypted */
err = fscrypt_fname_disk_to_usr(inode,
0, 0, &de_name, &fstr);
EXT4_DIRENT_HASH(de),
EXT4_DIRENT_MINOR_HASH(de),
&de_name, &fstr);
de_name = fstr;
fstr.len = save_len;
if (err)
......
This diff is collapsed.
......@@ -66,7 +66,7 @@
* Fast Commit Ineligibility
* -------------------------
* Not all operations are supported by fast commits today (e.g extended
* attributes). Fast commit ineligiblity is marked by calling one of the
* attributes). Fast commit ineligibility is marked by calling one of the
* two following functions:
*
* - ext4_fc_mark_ineligible(): This makes next fast commit operation to fall
......@@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
head.fc_tid = cpu_to_le32(
sbi->s_journal->j_running_transaction->t_tid);
if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
(u8 *)&head, &crc))
(u8 *)&head, &crc)) {
ret = -ENOSPC;
goto out;
}
}
spin_lock(&sbi->s_fc_lock);
......@@ -1734,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
}
/* Range is mapped and needs a state change */
jbd_debug(1, "Converting from %d to %d %lld",
jbd_debug(1, "Converting from %ld to %d %lld",
map.m_flags & EXT4_MAP_UNWRITTEN,
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
......
......@@ -371,15 +371,32 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
loff_t offset = iocb->ki_pos;
loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
if (error)
return error;
if (size && flags & IOMAP_DIO_UNWRITTEN)
return ext4_convert_unwritten_extents(NULL, inode,
offset, size);
if (size && flags & IOMAP_DIO_UNWRITTEN) {
error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
if (error < 0)
return error;
}
/*
* If we are extending the file, we have to update i_size here before
* page cache gets invalidated in iomap_dio_rw(). Otherwise racing
* buffered reads could zero out too much from page cache pages. Update
* of on-disk size will happen later in ext4_dio_write_iter() where
* we have enough information to also perform orphan list handling etc.
* Note that we perform all extending writes synchronously under
* i_rwsem held exclusively so i_size update is safe here in that case.
* If the write was not extending, we cannot see pos > i_size here
* because operations reducing i_size like truncate wait for all
* outstanding DIO before updating i_size.
*/
pos += size;
if (pos > i_size_read(inode))
i_size_write(inode, pos);
return 0;
}
......
......@@ -197,7 +197,7 @@ static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
* represented, and whether or not the returned hash is 32 bits or 64
* bits. 32 bit hashes will return 0 for the minor hash.
*/
static int __ext4fs_dirhash(const char *name, int len,
static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
struct dx_hash_info *hinfo)
{
__u32 hash;
......@@ -259,6 +259,22 @@ static int __ext4fs_dirhash(const char *name, int len,
hash = buf[0];
minor_hash = buf[1];
break;
case DX_HASH_SIPHASH:
{
struct qstr qname = QSTR_INIT(name, len);
__u64 combined_hash;
if (fscrypt_has_encryption_key(dir)) {
combined_hash = fscrypt_fname_siphash(dir, &qname);
} else {
ext4_warning_inode(dir, "Siphash requires key");
return -1;
}
hash = (__u32)(combined_hash >> 32);
minor_hash = (__u32)combined_hash;
break;
}
default:
hinfo->hash = 0;
return -1;
......@@ -280,7 +296,8 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
unsigned char *buff;
struct qstr qstr = {.name = name, .len = len };
if (len && IS_CASEFOLDED(dir) && um) {
if (len && IS_CASEFOLDED(dir) && um &&
(!IS_ENCRYPTED(dir) || fscrypt_has_encryption_key(dir))) {
buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
if (!buff)
return -ENOMEM;
......@@ -291,12 +308,12 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
goto opaque_seq;
}
r = __ext4fs_dirhash(buff, dlen, hinfo);
r = __ext4fs_dirhash(dir, buff, dlen, hinfo);
kfree(buff);
return r;
}
opaque_seq:
#endif
return __ext4fs_dirhash(name, len, hinfo);
return __ext4fs_dirhash(dir, name, len, hinfo);
}
......@@ -1292,7 +1292,8 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
ei->i_extra_isize = sbi->s_want_extra_isize;
ei->i_inline_off = 0;
if (ext4_has_feature_inline_data(sb))
if (ext4_has_feature_inline_data(sb) &&
(!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
ret = inode;
err = dquot_alloc_inode(inode);
......@@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
handle_t *handle;
ext4_fsblk_t blk;
int num, ret = 0, used_blks = 0;
unsigned long used_inos = 0;
/* This should not happen, but just to be sure check this */
if (sb_rdonly(sb)) {
......@@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
* used inodes so we need to skip blocks with used inodes in
* inode table.
*/
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp)) <
EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
group, used_blks,
ext4_itable_unused_count(sb, gdp));
ret = 1;
goto err_out;
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
used_inos = EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp);
used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
/* Bogus inode unused count? */
if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
group, used_blks,
ext4_itable_unused_count(sb, gdp));
ret = 1;
goto err_out;
}
used_inos += group * EXT4_INODES_PER_GROUP(sb);
/*
* Are there some uninitialized inodes in the inode table
* before the first normal inode?
*/
if ((used_blks != sbi->s_itb_per_group) &&
(used_inos < EXT4_FIRST_INO(sb))) {
ext4_error(sb, "Something is wrong with group %u: "
"itable unused count: %u; "
"itables initialized count: %ld",
group, ext4_itable_unused_count(sb, gdp),
used_inos);
ret = 1;
goto err_out;
}
}
blk = ext4_inode_table(sb, gdp) + used_blks;
......
......@@ -705,7 +705,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* be able to restart the transaction at a convenient checkpoint to make
* sure we don't overflow the journal.
*
* Try to extend this transaction for the purposes of truncation. If
......
......@@ -795,7 +795,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
* clear the inode state safely.
* 2. The inode has inline data, then we need to read the data, make it
* update and dirty so that ext4_da_writepages can handle it. We don't
* need to start the journal since the file's metatdata isn't changed now.
* need to start the journal since the file's metadata isn't changed now.
*/
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
......@@ -1031,7 +1031,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err)
return err;
ext4_insert_dentry(inode, de, inline_size, fname);
ext4_insert_dentry(dir, inode, de, inline_size, fname);
ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
......@@ -1100,7 +1100,7 @@ static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
int new_size = get_max_inline_xattr_value_size(dir, iloc);
if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
if (new_size - old_size <= ext4_dir_rec_len(1, NULL))
return -ENOSPC;
ret = ext4_update_inline_data(handle, dir,
......@@ -1380,8 +1380,8 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
fake.name_len = 1;
strcpy(fake.name, ".");
fake.rec_len = ext4_rec_len_to_disk(
EXT4_DIR_REC_LEN(fake.name_len),
inline_size);
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
de = &fake;
pos = EXT4_INLINE_DOTDOT_OFFSET;
......@@ -1390,8 +1390,8 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
fake.name_len = 2;
strcpy(fake.name, "..");
fake.rec_len = ext4_rec_len_to_disk(
EXT4_DIR_REC_LEN(fake.name_len),
inline_size);
ext4_dir_rec_len(fake.name_len, NULL),
inline_size);
ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
de = &fake;
pos = EXT4_INLINE_DOTDOT_SIZE;
......@@ -1406,7 +1406,12 @@ int ext4_inlinedir_to_tree(struct file *dir_file,
}
}
ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
if (ext4_hash_in_dirent(dir)) {
hinfo->hash = EXT4_DIRENT_HASH(de);
hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
} else {
ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
}
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
(hinfo->minor_hash < start_minor_hash)))
......@@ -1488,8 +1493,8 @@ int ext4_read_inline_dir(struct file *file,
* So we will use extra_offset and extra_size to indicate them
* during the inline dir iteration.
*/
dotdot_offset = EXT4_DIR_REC_LEN(1);
dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
dotdot_offset = ext4_dir_rec_len(1, NULL);
dotdot_size = dotdot_offset + ext4_dir_rec_len(2, NULL);
extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
extra_size = extra_offset + inline_size;
......@@ -1524,7 +1529,7 @@ int ext4_read_inline_dir(struct file *file,
* failure will be detected in the
* dirent test below. */
if (ext4_rec_len_from_disk(de->rec_len, extra_size)
< EXT4_DIR_REC_LEN(1))
< ext4_dir_rec_len(1, NULL))
break;
i += ext4_rec_len_from_disk(de->rec_len,
extra_size);
......
......@@ -1066,8 +1066,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
set_buffer_uptodate(bh);
}
continue;
}
......@@ -1092,8 +1091,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
......@@ -3824,7 +3822,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
* starting from file offset 'from'. The range to be zero'd must
* be contained with in one block. If the specified range exceeds
* the end of the block it will be shortened to end of the block
* that cooresponds to 'from'
* that corresponds to 'from'
*/
static int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
......
......@@ -316,6 +316,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
static bool dax_compatible(struct inode *inode, unsigned int oldflags,
unsigned int flags)
{
/* Allow the DAX flag to be changed on inline directories */
if (S_ISDIR(inode->i_mode)) {
flags &= ~EXT4_INLINE_DATA_FL;
oldflags &= ~EXT4_INLINE_DATA_FL;
}
if (flags & EXT4_DAX_FL) {
if ((oldflags & EXT4_DAX_MUT_EXCL) ||
ext4_test_inode_state(inode,
......
This diff is collapsed.
......@@ -59,7 +59,7 @@
* by the stream allocator, which purpose is to pack requests
* as close each to other as possible to produce smooth I/O traffic
* We use locality group prealloc space for stream request.
* We can tune the same via /proc/fs/ext4/<parition>/stream_req
* We can tune the same via /proc/fs/ext4/<partition>/stream_req
*/
#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
......@@ -78,6 +78,23 @@
*/
#define MB_DEFAULT_MAX_INODE_PREALLOC 512
/*
* Number of groups to search linearly before performing group scanning
* optimization.
*/
#define MB_DEFAULT_LINEAR_LIMIT 4
/*
* Minimum number of groups that should be present in the file system to perform
* group scanning optimizations.
*/
#define MB_DEFAULT_LINEAR_SCAN_THRESHOLD 16
/*
* Number of valid buddy orders
*/
#define MB_NUM_ORDERS(sb) ((sb)->s_blocksize_bits + 2)
struct ext4_free_data {
/* this links the free block information from sb_info */
struct list_head efd_list;
......@@ -161,11 +178,14 @@ struct ext4_allocation_context {
/* copy of the best found extent taken before preallocation efforts */
struct ext4_free_extent ac_f_ex;
ext4_group_t ac_last_optimal_group;
__u32 ac_groups_considered;
__u32 ac_flags; /* allocation hints */
__u16 ac_groups_scanned;
__u16 ac_groups_linear_remaining;
__u16 ac_found;
__u16 ac_tail;
__u16 ac_buddy;
__u16 ac_flags; /* allocation hints */
__u8 ac_status;
__u8 ac_criteria;
__u8 ac_2order; /* if request is to allocate 2^N blocks and
......
......@@ -32,7 +32,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
newext.ee_block = cpu_to_le32(lb->first_block);
newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
ext4_ext_store_pblock(&newext, lb->first_pblock);
/* Locking only for convinience since we are operating on temp inode */
/* Locking only for convenience since we are operating on temp inode */
down_write(&EXT4_I(inode)->i_data_sem);
path = ext4_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
......@@ -43,8 +43,8 @@ static int finish_range(handle_t *handle, struct inode *inode,
/*
* Calculate the credit needed to inserting this extent
* Since we are doing this in loop we may accumalate extra
* credit. But below we try to not accumalate too much
* Since we are doing this in loop we may accumulate extra
* credit. But below we try to not accumulate too much
* of them by restarting the journal.
*/
needed = ext4_ext_calc_credits_for_single_extent(inode,
......
......@@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
wait_on_buffer(bh);
sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
return 1;
return -EIO;
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -215,6 +215,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
......@@ -263,6 +264,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(mb_stream_req),
ATTR_LIST(mb_group_prealloc),
ATTR_LIST(mb_max_inode_prealloc),
ATTR_LIST(mb_max_linear_groups),
ATTR_LIST(max_writeback_mb_bump),
ATTR_LIST(extent_max_zeroout_kb),
ATTR_LIST(trigger_fs_error),
......@@ -313,6 +315,7 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
EXT4_ATTR_FEATURE(encrypted_casefold);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
......@@ -330,6 +333,7 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
ATTR_LIST(encrypted_casefold),
NULL,
};
ATTRIBUTE_GROUPS(ext4_feat);
......@@ -528,6 +532,10 @@ int ext4_register_sysfs(struct super_block *sb)
ext4_fc_info_show, sb);
proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_ops, sb);
proc_create_single_data("mb_stats", 0444, sbi->s_proc,
ext4_seq_mb_stats_show, sb);
proc_create_seq_data("mb_structs_summary", 0444, sbi->s_proc,
&ext4_mb_seq_structs_summary_ops, sb);
}
return 0;
}
......
......@@ -45,16 +45,13 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
void *addr;
page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
NULL);
if (IS_ERR(page))
return PTR_ERR(page);
addr = kmap_atomic(page);
memcpy(buf, addr + offset_in_page(pos), n);
kunmap_atomic(addr);
memcpy_from_page(buf, page, offset_in_page(pos), n);
put_page(page);
......@@ -80,7 +77,6 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
PAGE_SIZE - offset_in_page(pos));
struct page *page;
void *fsdata;
void *addr;
int res;
res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
......@@ -88,9 +84,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
if (res)
return res;
addr = kmap_atomic(page);
memcpy(addr + offset_in_page(pos), buf, n);
kunmap_atomic(addr);
memcpy_to_page(page, offset_in_page(pos), buf, n);
res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
page, fsdata);
......
......@@ -1617,7 +1617,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
* If storing the value in an external inode is an option,
* reserve space for xattr entries/names in the external
* attribute block so that a long value does not occupy the
* whole space and prevent futher entries being added.
* whole space and prevent further entries being added.
*/
if (ext4_has_feature_ea_inode(inode->i_sb) &&
new_size && is_block &&
......
......@@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
return 0;
while (next_fc_block <= journal->j_fc_last) {
jbd_debug(3, "Fast commit replay: next block %ld",
jbd_debug(3, "Fast commit replay: next block %ld\n",
next_fc_block);
err = jread(&bh, journal, next_fc_block);
if (err) {
jbd_debug(3, "Fast commit replay: read error");
jbd_debug(3, "Fast commit replay: read error\n");
break;
}
jbd_debug(3, "Processing fast commit blk with seq %d");
err = journal->j_fc_replay_callback(journal, bh, pass,
next_fc_block - journal->j_fc_first,
expected_commit_id);
......
......@@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
}
alloc_transaction:
if (!journal->j_running_transaction) {
/*
* This check is racy but it is just an optimization of allocating new
* transaction early if there are high chances we'll need it. If we
* guess wrong, we'll retry or free unused transaction.
*/
if (!data_race(journal->j_running_transaction)) {
/*
* If __GFP_FS is not present, then we may be being called from
* inside the fs writeback layer, so we MUST NOT fail.
......@@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* crucial to catch bugs so let's do a reliable check until the
* lockless handling is fully proven.
*/
if (jh->b_transaction != transaction &&
jh->b_next_transaction != transaction) {
if (data_race(jh->b_transaction != transaction &&
jh->b_next_transaction != transaction)) {
spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
jh->b_next_transaction == transaction);
......@@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
}
if (jh->b_modified == 1) {
/* If it's in our transaction it must be in BJ_Metadata list. */
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
if (data_race(jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)) {
spin_lock(&jh->b_state_lock);
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)
......
......@@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
/* SB_NOATIME means filesystem supplies dummy atime value */
if (inode->i_sb->s_flags & SB_NOATIME)
stat->result_mask &= ~STATX_ATIME;
/*
* Note: If you add another clause to set an attribute flag, please
* update attributes_mask below.
*/
if (IS_AUTOMOUNT(inode))
stat->attributes |= STATX_ATTR_AUTOMOUNT;
if (IS_DAX(inode))
stat->attributes |= STATX_ATTR_DAX;
stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
STATX_ATTR_DAX);
mnt_userns = mnt_user_ns(path->mnt);
if (inode->i_op->getattr)
return inode->i_op->getattr(mnt_userns, path, stat,
......
......@@ -61,7 +61,7 @@ void __jbd2_debug(int level, const char *file, const char *func,
#define jbd_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
#define jbd_debug(n, fmt, a...) /**/
#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
......@@ -594,18 +594,22 @@ struct transaction_s
*/
unsigned long t_log_start;
/* Number of buffers on the t_buffers list [j_list_lock] */
/*
* Number of buffers on the t_buffers list [j_list_lock, no locks
* needed for jbd2 thread]
*/
int t_nr_buffers;
/*
* Doubly-linked circular list of all buffers reserved but not yet
* modified by this transaction [j_list_lock]
* modified by this transaction [j_list_lock, no locks needed fo
* jbd2 thread]
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
* transaction [j_list_lock]
* transaction [j_list_lock, no locks needed for jbd2 thread]
*/
struct journal_head *t_buffers;
......@@ -629,9 +633,11 @@ struct transaction_s
struct journal_head *t_checkpoint_io_list;
/*
* Doubly-linked circular list of metadata buffers being shadowed by log
* IO. The IO buffers on the iobuf list and the shadow buffers on this
* list match each other one for one at all times. [j_list_lock]
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
* the shadow buffers on this list match each other one for
* one at all times. [j_list_lock, no locks needed for jbd2
* thread]
*/
struct journal_head *t_shadow_list;
......@@ -768,7 +774,8 @@ enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
struct journal_s
{
/**
* @j_flags: General journaling state flags [j_state_lock]
* @j_flags: General journaling state flags [j_state_lock,
* no lock for quick racy checks]
*/
unsigned long j_flags;
......@@ -808,7 +815,8 @@ struct journal_s
/**
* @j_barrier_count:
*
* Number of processes waiting to create a barrier lock [j_state_lock]
* Number of processes waiting to create a barrier lock [j_state_lock,
* no lock for quick racy checks]
*/
int j_barrier_count;
......@@ -821,7 +829,8 @@ struct journal_s
* @j_running_transaction:
*
* Transactions: The current running transaction...
* [j_state_lock] [caller holding open handle]
* [j_state_lock, no lock for quick racy checks] [caller holding
* open handle]
*/
transaction_t *j_running_transaction;
......@@ -1033,7 +1042,7 @@ struct journal_s
* @j_commit_sequence:
*
* Sequence number of the most recently committed transaction
* [j_state_lock].
* [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_sequence;
......@@ -1041,7 +1050,7 @@ struct journal_s
* @j_commit_request:
*
* Sequence number of the most recent transaction wanting commit
* [j_state_lock]
* [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_request;
......
......@@ -1358,64 +1358,6 @@ TRACE_EVENT(ext4_read_block_bitmap_load,
__entry->group, __entry->prefetch)
);
TRACE_EVENT(ext4_direct_IO_enter,
TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
TP_ARGS(inode, offset, len, rw),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned long, len )
__field( int, rw )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pos = offset;
__entry->len = len;
__entry->rw = rw;
),
TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->pos, __entry->len, __entry->rw)
);
TRACE_EVENT(ext4_direct_IO_exit,
TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
int rw, int ret),
TP_ARGS(inode, offset, len, rw, ret),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned long, len )
__field( int, rw )
__field( int, ret )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pos = offset;
__entry->len = len;
__entry->rw = rw;
__entry->ret = ret;
),
TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->pos, __entry->len,
__entry->rw, __entry->ret)
);
DECLARE_EVENT_CLASS(ext4__fallocate_mode,
TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
......@@ -1962,124 +1904,6 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
__entry->len, show_mflags(__entry->flags), __entry->ret)
);
TRACE_EVENT(ext4_ext_put_in_cache,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
ext4_fsblk_t start),
TP_ARGS(inode, lblk, len, start),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
__field( ext4_fsblk_t, start )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->lblk = lblk;
__entry->len = len;
__entry->start = start;
),
TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->lblk,
__entry->len,
(unsigned long long) __entry->start)
);
TRACE_EVENT(ext4_ext_in_cache,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
TP_ARGS(inode, lblk, ret),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( int, ret )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->lblk = lblk;
__entry->ret = ret;
),
TP_printk("dev %d,%d ino %lu lblk %u ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->lblk,
__entry->ret)
);
TRACE_EVENT(ext4_find_delalloc_range,
TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
int reverse, int found, ext4_lblk_t found_blk),
TP_ARGS(inode, from, to, reverse, found, found_blk),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, from )
__field( ext4_lblk_t, to )
__field( int, reverse )
__field( int, found )
__field( ext4_lblk_t, found_blk )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->from = from;
__entry->to = to;
__entry->reverse = reverse;
__entry->found = found;
__entry->found_blk = found_blk;
),
TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
"(blk = %u)",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->from, (unsigned) __entry->to,
__entry->reverse, __entry->found,
(unsigned) __entry->found_blk)
);
TRACE_EVENT(ext4_get_reserved_cluster_alloc,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
TP_ARGS(inode, lblk, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, lblk )
__field( unsigned int, len )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->lblk = lblk;
__entry->len = len;
),
TP_printk("dev %d,%d ino %lu lblk %u len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->lblk,
__entry->len)
);
TRACE_EVENT(ext4_ext_show_extent,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
unsigned short len),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment