Commit 1f63b9c1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (36 commits)
  ext4: fix up rb_root initializations to use RB_ROOT
  ext4: Code cleanup for EXT4_IOC_MOVE_EXT ioctl
  ext4: Fix the NULL reference in double_down_write_data_sem()
  ext4: Fix insertion point of extent in mext_insert_across_blocks()
  ext4: consolidate in_range() definitions
  ext4: cleanup to use ext4_grp_offs_to_block()
  ext4: cleanup to use ext4_group_first_block_no()
  ext4: Release page references acquired in ext4_da_block_invalidatepages
  ext4: Fix ext4_quota_write cross block boundary behaviour
  ext4: Convert BUG_ON checks to use ext4_error() instead
  ext4: Use direct_IO_no_locking in ext4 dio read
  ext4: use ext4_get_block_write in buffer write
  ext4: mechanical rename some of the direct I/O get_block's identifiers
  ext4: make "offset" consistent in ext4_check_dir_entry()
  ext4: Handle non empty on-disk orphan link
  ext4: explicitly remove inode from orphan list after failed direct io
  ext4: fix error handling in migrate
  ext4: deprecate obsoleted mount options
  ext4: Fix fencepost error in chosing choosing group vs file preallocation.
  jbd2: clean up an assertion in jbd2_journal_commit_transaction()
  ...
parents b24bc1e6 64e290ec
...@@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -97,8 +97,8 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks used to prevent allocation /* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */ * essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
ext4_error(sb, __func__, ext4_error(sb, "Checksum bad for group %u",
"Checksum bad for group %u", block_group); block_group);
ext4_free_blks_set(sb, gdp, 0); ext4_free_blks_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0);
...@@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -130,8 +130,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* to make sure we calculate the right free blocks * to make sure we calculate the right free blocks
*/ */
group_blocks = ext4_blocks_count(sbi->s_es) - group_blocks = ext4_blocks_count(sbi->s_es) -
le32_to_cpu(sbi->s_es->s_first_data_block) - ext4_group_first_block_no(sb, ngroups - 1);
(EXT4_BLOCKS_PER_GROUP(sb) * (ngroups - 1));
} else { } else {
group_blocks = EXT4_BLOCKS_PER_GROUP(sb); group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
} }
...@@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -189,9 +188,6 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* when a file system is mounted (see ext4_fill_super). * when a file system is mounted (see ext4_fill_super).
*/ */
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
/** /**
* ext4_get_group_desc() -- load group descriptor from disk * ext4_get_group_desc() -- load group descriptor from disk
* @sb: super block * @sb: super block
...@@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -210,10 +206,8 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
if (block_group >= ngroups) { if (block_group >= ngroups) {
ext4_error(sb, "ext4_get_group_desc", ext4_error(sb, "block_group >= groups_count - block_group = %u,"
"block_group >= groups_count - " " groups_count = %u", block_group, ngroups);
"block_group = %u, groups_count = %u",
block_group, ngroups);
return NULL; return NULL;
} }
...@@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -221,8 +215,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) { if (!sbi->s_group_desc[group_desc]) {
ext4_error(sb, "ext4_get_group_desc", ext4_error(sb, "Group descriptor not loaded - "
"Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u", "block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset); block_group, group_desc, offset);
return NULL; return NULL;
...@@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb, ...@@ -282,9 +275,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
return 1; return 1;
err_out: err_out:
ext4_error(sb, __func__, ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
"Invalid block bitmap - "
"block_group = %d, block = %llu",
block_group, bitmap_blk); block_group, bitmap_blk);
return 0; return 0;
} }
...@@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -311,8 +302,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
bitmap_blk = ext4_block_bitmap(sb, desc); bitmap_blk = ext4_block_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk); bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) { if (unlikely(!bh)) {
ext4_error(sb, __func__, ext4_error(sb, "Cannot read block bitmap - "
"Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu", "block_group = %u, block_bitmap = %llu",
block_group, bitmap_blk); block_group, bitmap_blk);
return NULL; return NULL;
...@@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -354,8 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
set_bitmap_uptodate(bh); set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) { if (bh_submit_read(bh) < 0) {
put_bh(bh); put_bh(bh);
ext4_error(sb, __func__, ext4_error(sb, "Cannot read block bitmap - "
"Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu", "block_group = %u, block_bitmap = %llu",
block_group, bitmap_blk); block_group, bitmap_blk);
return NULL; return NULL;
...@@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ...@@ -419,8 +408,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
in_range(block + count - 1, ext4_inode_table(sb, desc), in_range(block + count - 1, ext4_inode_table(sb, desc),
sbi->s_itb_per_group)) { sbi->s_itb_per_group)) {
ext4_error(sb, __func__, ext4_error(sb, "Adding blocks in system zones - "
"Adding blocks in system zones - "
"Block = %llu, count = %lu", "Block = %llu, count = %lu",
block, count); block, count);
goto error_return; goto error_return;
...@@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ...@@ -453,8 +441,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
BUFFER_TRACE(bitmap_bh, "clear bit"); BUFFER_TRACE(bitmap_bh, "clear bit");
if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit + i, bitmap_bh->b_data)) { bit + i, bitmap_bh->b_data)) {
ext4_error(sb, __func__, ext4_error(sb, "bit already cleared for block %llu",
"bit already cleared for block %llu",
(ext4_fsblk_t)(block + i)); (ext4_fsblk_t)(block + i));
BUFFER_TRACE(bitmap_bh, "bit already cleared"); BUFFER_TRACE(bitmap_bh, "bit already cleared");
} else { } else {
......
...@@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb) ...@@ -205,14 +205,14 @@ void ext4_release_system_zone(struct super_block *sb)
entry = rb_entry(n, struct ext4_system_zone, node); entry = rb_entry(n, struct ext4_system_zone, node);
kmem_cache_free(ext4_system_zone_cachep, entry); kmem_cache_free(ext4_system_zone_cachep, entry);
if (!parent) if (!parent)
EXT4_SB(sb)->system_blks.rb_node = NULL; EXT4_SB(sb)->system_blks = RB_ROOT;
else if (parent->rb_left == n) else if (parent->rb_left == n)
parent->rb_left = NULL; parent->rb_left = NULL;
else if (parent->rb_right == n) else if (parent->rb_right == n)
parent->rb_right = NULL; parent->rb_right = NULL;
n = parent; n = parent;
} }
EXT4_SB(sb)->system_blks.rb_node = NULL; EXT4_SB(sb)->system_blks = RB_ROOT;
} }
/* /*
......
...@@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir, ...@@ -83,10 +83,12 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
error_msg = "inode out of bounds"; error_msg = "inode out of bounds";
if (error_msg != NULL) if (error_msg != NULL)
ext4_error(dir->i_sb, function, __ext4_error(dir->i_sb, function,
"bad entry in directory #%lu: %s - " "bad entry in directory #%lu: %s - block=%llu"
"offset=%u, inode=%u, rec_len=%d, name_len=%d", "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
dir->i_ino, error_msg, offset, dir->i_ino, error_msg,
(unsigned long long) bh->b_blocknr,
(unsigned) (offset%bh->b_size), offset,
le32_to_cpu(de->inode), le32_to_cpu(de->inode),
rlen, de->name_len); rlen, de->name_len);
return error_msg == NULL ? 1 : 0; return error_msg == NULL ? 1 : 0;
...@@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp, ...@@ -150,7 +152,7 @@ static int ext4_readdir(struct file *filp,
*/ */
if (!bh) { if (!bh) {
if (!dir_has_error) { if (!dir_has_error) {
ext4_error(sb, __func__, "directory #%lu " ext4_error(sb, "directory #%lu "
"contains a hole at offset %Lu", "contains a hole at offset %Lu",
inode->i_ino, inode->i_ino,
(unsigned long long) filp->f_pos); (unsigned long long) filp->f_pos);
...@@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root) ...@@ -303,7 +305,7 @@ static void free_rb_tree_fname(struct rb_root *root)
kfree(old); kfree(old);
} }
if (!parent) if (!parent)
root->rb_node = NULL; *root = RB_ROOT;
else if (parent->rb_left == n) else if (parent->rb_left == n)
parent->rb_left = NULL; parent->rb_left = NULL;
else if (parent->rb_right == n) else if (parent->rb_right == n)
......
...@@ -53,6 +53,12 @@ ...@@ -53,6 +53,12 @@
#define ext4_debug(f, a...) do {} while (0) #define ext4_debug(f, a...) do {} while (0)
#endif #endif
#define EXT4_ERROR_INODE(inode, fmt, a...) \
ext4_error_inode(__func__, (inode), (fmt), ## a);
#define EXT4_ERROR_FILE(file, fmt, a...) \
ext4_error_file(__func__, (file), (fmt), ## a);
/* data type for block offset of block group */ /* data type for block offset of block group */
typedef int ext4_grpblk_t; typedef int ext4_grpblk_t;
...@@ -133,14 +139,14 @@ struct mpage_da_data { ...@@ -133,14 +139,14 @@ struct mpage_da_data {
int pages_written; int pages_written;
int retval; int retval;
}; };
#define DIO_AIO_UNWRITTEN 0x1 #define EXT4_IO_UNWRITTEN 0x1
typedef struct ext4_io_end { typedef struct ext4_io_end {
struct list_head list; /* per-file finished AIO list */ struct list_head list; /* per-file finished AIO list */
struct inode *inode; /* file being written to */ struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */ unsigned int flag; /* unwritten or not */
int error; /* I/O error code */ struct page *page; /* page struct for buffer write */
ext4_lblk_t offset; /* offset in the file */ loff_t offset; /* offset in the file */
size_t size; /* size of the extent */ ssize_t size; /* size of the extent */
struct work_struct work; /* data work queue */ struct work_struct work; /* data work queue */
} ext4_io_end_t; } ext4_io_end_t;
...@@ -284,10 +290,12 @@ struct flex_groups { ...@@ -284,10 +290,12 @@ struct flex_groups {
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ #define EXT4_FL_USER_VISIBLE 0x004BDFFF /* User visible flags */
#define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */ #define EXT4_FL_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */
/* Flags that should be inherited by new inodes from their parent. */ /* Flags that should be inherited by new inodes from their parent. */
#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
...@@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) ...@@ -313,17 +321,6 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
return flags & EXT4_OTHER_FLMASK; return flags & EXT4_OTHER_FLMASK;
} }
/*
* Inode dynamic state flags
*/
#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */
#define EXT4_STATE_DIO_UNWRITTEN 0x00000040 /* need convert on dio done*/
/* Used to pass group descriptor data when online resize is done */ /* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input { struct ext4_new_group_input {
__u32 group; /* Group number for this data */ __u32 group; /* Group number for this data */
...@@ -364,19 +361,20 @@ struct ext4_new_group_data { ...@@ -364,19 +361,20 @@ struct ext4_new_group_data {
/* caller is from the direct IO path, request to creation of an /* caller is from the direct IO path, request to creation of an
unitialized extents if not allocated, split the uninitialized unitialized extents if not allocated, split the uninitialized
extent if blocks has been preallocated already*/ extent if blocks has been preallocated already*/
#define EXT4_GET_BLOCKS_DIO 0x0008 #define EXT4_GET_BLOCKS_PRE_IO 0x0008
#define EXT4_GET_BLOCKS_CONVERT 0x0010 #define EXT4_GET_BLOCKS_CONVERT 0x0010
#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ #define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Convert extent to initialized after IO complete */
#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Convert extent to initialized after direct IO complete */
#define EXT4_GET_BLOCKS_DIO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
EXT4_GET_BLOCKS_DIO_CREATE_EXT)
/* /*
* Flags used by ext4_free_blocks * Flags used by ext4_free_blocks
*/ */
#define EXT4_FREE_BLOCKS_METADATA 0x0001 #define EXT4_FREE_BLOCKS_METADATA 0x0001
#define EXT4_FREE_BLOCKS_FORGET 0x0002 #define EXT4_FREE_BLOCKS_FORGET 0x0002
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
/* /*
* ioctl commands * ioctl commands
...@@ -630,7 +628,7 @@ struct ext4_inode_info { ...@@ -630,7 +628,7 @@ struct ext4_inode_info {
* near to their parent directory's inode. * near to their parent directory's inode.
*/ */
ext4_group_t i_block_group; ext4_group_t i_block_group;
__u32 i_state; /* Dynamic state flags for ext4 */ unsigned long i_state_flags; /* Dynamic state flags */
ext4_lblk_t i_dir_start_lookup; ext4_lblk_t i_dir_start_lookup;
#ifdef CONFIG_EXT4_FS_XATTR #ifdef CONFIG_EXT4_FS_XATTR
...@@ -708,8 +706,9 @@ struct ext4_inode_info { ...@@ -708,8 +706,9 @@ struct ext4_inode_info {
qsize_t i_reserved_quota; qsize_t i_reserved_quota;
#endif #endif
/* completed async DIOs that might need unwritten extents handling */ /* completed IOs that might need unwritten extents handling */
struct list_head i_aio_dio_complete_list; struct list_head i_completed_io_list;
spinlock_t i_completed_io_lock;
/* current io_end structure for async DIO write*/ /* current io_end structure for async DIO write*/
ext4_io_end_t *cur_aio_dio; ext4_io_end_t *cur_aio_dio;
...@@ -760,6 +759,7 @@ struct ext4_inode_info { ...@@ -760,6 +759,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
...@@ -1050,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) ...@@ -1050,6 +1050,34 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
(ino >= EXT4_FIRST_INO(sb) && (ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
} }
/*
* Inode dynamic state flags
*/
enum {
EXT4_STATE_JDATA, /* journaled data exists */
EXT4_STATE_NEW, /* inode is newly created */
EXT4_STATE_XATTR, /* has in-inode xattrs */
EXT4_STATE_NO_EXPAND, /* No space for expansion */
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
};
static inline int ext4_test_inode_state(struct inode *inode, int bit)
{
return test_bit(bit, &EXT4_I(inode)->i_state_flags);
}
static inline void ext4_set_inode_state(struct inode *inode, int bit)
{
set_bit(bit, &EXT4_I(inode)->i_state_flags);
}
static inline void ext4_clear_inode_state(struct inode *inode, int bit)
{
clear_bit(bit, &EXT4_I(inode)->i_state_flags);
}
#else #else
/* Assume that user mode programs are passing in an ext4fs superblock, not /* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test * a kernel struct super_block. This will allow us to call the feature-test
...@@ -1126,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) ...@@ -1126,6 +1154,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
#define EXT4_FEATURE_INCOMPAT_MMP 0x0100 #define EXT4_FEATURE_INCOMPAT_MMP 0x0100
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200 #define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
...@@ -1439,7 +1469,7 @@ extern int ext4_block_truncate_page(handle_t *handle, ...@@ -1439,7 +1469,7 @@ extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from); struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode); extern int flush_completed_IO(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode, extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim); int used, int quota_claim);
/* ioctl.c */ /* ioctl.c */
...@@ -1465,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb, ...@@ -1465,13 +1495,20 @@ extern int ext4_group_extend(struct super_block *sb,
ext4_fsblk_t n_blocks_count); ext4_fsblk_t n_blocks_count);
/* super.c */ /* super.c */
extern void ext4_error(struct super_block *, const char *, const char *, ...) extern void __ext4_error(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
#define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
extern void ext4_error_inode(const char *, struct inode *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void ext4_error_file(const char *, struct file *, const char *, ...)
__attribute__ ((format (printf, 3, 4))); __attribute__ ((format (printf, 3, 4)));
extern void __ext4_std_error(struct super_block *, const char *, int); extern void __ext4_std_error(struct super_block *, const char *, int);
extern void ext4_abort(struct super_block *, const char *, const char *, ...) extern void ext4_abort(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4))); __attribute__ ((format (printf, 3, 4)));
extern void ext4_warning(struct super_block *, const char *, const char *, ...) extern void __ext4_warning(struct super_block *, const char *,
const char *, ...)
__attribute__ ((format (printf, 3, 4))); __attribute__ ((format (printf, 3, 4)));
#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
extern void ext4_msg(struct super_block *, const char *, const char *, ...) extern void ext4_msg(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4))); __attribute__ ((format (printf, 3, 4)));
extern void ext4_grp_locked_error(struct super_block *, ext4_group_t, extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
...@@ -1744,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *); ...@@ -1744,7 +1781,7 @@ extern void ext4_ext_release(struct super_block *);
extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len); loff_t len);
extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
loff_t len); ssize_t len);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode, extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks, sector_t block, unsigned int max_blocks,
struct buffer_head *bh, int flags); struct buffer_head *bh, int flags);
...@@ -1756,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, ...@@ -1756,6 +1793,15 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 len, __u64 *moved_len); __u64 len, __u64 *moved_len);
/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
enum ext4_state_bits {
BH_Uninit /* blocks are allocated but uninitialized on disk */
= BH_JBDPrivateStart,
};
BUFFER_FNS(Uninit, uninit)
TAS_BUFFER_FNS(Uninit, uninit)
/* /*
* Add new method to test wether block and inode bitmaps are properly * Add new method to test wether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough * initialized. With uninit_bg reading the block from disk is not enough
...@@ -1773,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) ...@@ -1773,6 +1819,8 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state); set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
} }
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _EXT4_H */ #endif /* _EXT4_H */
...@@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle, ...@@ -125,14 +125,14 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh, ext4_journal_abort_handle(where, __func__, bh,
handle, err); handle, err);
} else { } else {
if (inode && bh) if (inode)
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
else else
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
if (inode && inode_needs_sync(inode)) { if (inode && inode_needs_sync(inode)) {
sync_dirty_buffer(bh); sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) { if (buffer_req(bh) && !buffer_uptodate(bh)) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb,
"IO error syncing inode, " "IO error syncing inode, "
"inode=%lu, block=%llu", "inode=%lu, block=%llu",
inode->i_ino, inode->i_ino,
......
...@@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode) ...@@ -304,4 +304,28 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return 0; return 0;
} }
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
* i_mutex for direct I/O reads. This only works for extent-based
* files, and it doesn't work for nobh or if data journaling is
* enabled, since the dioread_nolock code uses b_private to pass
* information back to the I/O completion handler, and this conflicts
* with the jbd's use of b_private.
*/
static inline int ext4_should_dioread_nolock(struct inode *inode)
{
if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
return 0;
if (test_opt(inode->i_sb, NOBH))
return 0;
if (!S_ISREG(inode->i_mode))
return 0;
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
return 0;
if (ext4_should_journal_data(inode))
return 0;
return 1;
}
#endif /* _EXT4_JBD2_H */ #endif /* _EXT4_JBD2_H */
This diff is collapsed.
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
*/ */
static int ext4_release_file(struct inode *inode, struct file *filp) static int ext4_release_file(struct inode *inode, struct file *filp)
{ {
if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) { if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
ext4_alloc_da_blocks(inode); ext4_alloc_da_blocks(inode);
EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE; ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
} }
/* if we are the last writer on the inode, drop the block reservation */ /* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) && if ((filp->f_mode & FMODE_WRITE) &&
......
...@@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) ...@@ -63,7 +63,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
if (inode->i_sb->s_flags & MS_RDONLY) if (inode->i_sb->s_flags & MS_RDONLY)
return 0; return 0;
ret = flush_aio_dio_completed_IO(inode); ret = flush_completed_IO(inode);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -76,8 +76,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks and inodes use to prevent /* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */ * allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
ext4_error(sb, __func__, "Checksum bad for group %u", ext4_error(sb, "Checksum bad for group %u", block_group);
block_group);
ext4_free_blks_set(sb, gdp, 0); ext4_free_blks_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0);
...@@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -111,8 +110,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
bitmap_blk = ext4_inode_bitmap(sb, desc); bitmap_blk = ext4_inode_bitmap(sb, desc);
bh = sb_getblk(sb, bitmap_blk); bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) { if (unlikely(!bh)) {
ext4_error(sb, __func__, ext4_error(sb, "Cannot read inode bitmap - "
"Cannot read inode bitmap - "
"block_group = %u, inode_bitmap = %llu", "block_group = %u, inode_bitmap = %llu",
block_group, bitmap_blk); block_group, bitmap_blk);
return NULL; return NULL;
...@@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -153,8 +151,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
set_bitmap_uptodate(bh); set_bitmap_uptodate(bh);
if (bh_submit_read(bh) < 0) { if (bh_submit_read(bh) < 0) {
put_bh(bh); put_bh(bh);
ext4_error(sb, __func__, ext4_error(sb, "Cannot read inode bitmap - "
"Cannot read inode bitmap - "
"block_group = %u, inode_bitmap = %llu", "block_group = %u, inode_bitmap = %llu",
block_group, bitmap_blk); block_group, bitmap_blk);
return NULL; return NULL;
...@@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -229,8 +226,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
es = EXT4_SB(sb)->s_es; es = EXT4_SB(sb)->s_es;
if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext4_error(sb, "ext4_free_inode", ext4_error(sb, "reserved or nonexistent inode %lu", ino);
"reserved or nonexistent inode %lu", ino);
goto error_return; goto error_return;
} }
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
...@@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -248,8 +244,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit, bitmap_bh->b_data); bit, bitmap_bh->b_data);
if (!cleared) if (!cleared)
ext4_error(sb, "ext4_free_inode", ext4_error(sb, "bit already cleared for inode %lu", ino);
"bit already cleared for inode %lu", ino);
else { else {
gdp = ext4_get_group_desc(sb, block_group, &bh2); gdp = ext4_get_group_desc(sb, block_group, &bh2);
...@@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb, ...@@ -736,8 +731,7 @@ static int ext4_claim_inode(struct super_block *sb,
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
ino > EXT4_INODES_PER_GROUP(sb)) { ino > EXT4_INODES_PER_GROUP(sb)) {
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
ext4_error(sb, __func__, ext4_error(sb, "reserved inode or inode > inodes count - "
"reserved inode or inode > inodes count - "
"block_group = %u, inode=%lu", group, "block_group = %u, inode=%lu", group,
ino + group * EXT4_INODES_PER_GROUP(sb)); ino + group * EXT4_INODES_PER_GROUP(sb));
return 1; return 1;
...@@ -904,7 +898,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, ...@@ -904,7 +898,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
BUFFER_TRACE(inode_bitmap_bh, BUFFER_TRACE(inode_bitmap_bh,
"call ext4_handle_dirty_metadata"); "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, err = ext4_handle_dirty_metadata(handle,
inode, NULL,
inode_bitmap_bh); inode_bitmap_bh);
if (err) if (err)
goto fail; goto fail;
...@@ -1029,7 +1023,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, ...@@ -1029,7 +1023,8 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
inode->i_generation = sbi->s_next_generation++; inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock); spin_unlock(&sbi->s_next_gen_lock);
ei->i_state = EXT4_STATE_NEW; ei->i_state_flags = 0;
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
...@@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) ...@@ -1098,8 +1093,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
/* Error cases - e2fsck has already cleaned up for us */ /* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) { if (ino > max_ino) {
ext4_warning(sb, __func__, ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
"bad orphan ino %lu! e2fsck was run?", ino);
goto error; goto error;
} }
...@@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) ...@@ -1107,8 +1101,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group); bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
if (!bitmap_bh) { if (!bitmap_bh) {
ext4_warning(sb, __func__, ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
"inode bitmap error for orphan %lu", ino);
goto error; goto error;
} }
...@@ -1140,8 +1133,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) ...@@ -1140,8 +1133,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
err = PTR_ERR(inode); err = PTR_ERR(inode);
inode = NULL; inode = NULL;
bad_orphan: bad_orphan:
ext4_warning(sb, __func__, ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
"bad orphan inode %lu! e2fsck was run?", ino);
printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr, bit, (unsigned long long)bitmap_bh->b_blocknr,
ext4_test_bit(bit, bitmap_bh->b_data)); ext4_test_bit(bit, bitmap_bh->b_data));
......
This diff is collapsed.
...@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -92,6 +92,15 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
flags &= ~EXT4_EXTENTS_FL; flags &= ~EXT4_EXTENTS_FL;
} }
if (flags & EXT4_EOFBLOCKS_FL) {
/* we don't support adding EOFBLOCKS flag */
if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
err = -EOPNOTSUPP;
goto flags_out;
}
} else if (oldflags & EXT4_EOFBLOCKS_FL)
ext4_truncate(inode);
handle = ext4_journal_start(inode, 1); handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
err = PTR_ERR(handle); err = PTR_ERR(handle);
...@@ -249,7 +258,8 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -249,7 +258,8 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (me.moved_len > 0) if (me.moved_len > 0)
file_remove_suid(donor_filp); file_remove_suid(donor_filp);
if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) if (copy_to_user((struct move_extent __user *)arg,
&me, sizeof(me)))
err = -EFAULT; err = -EFAULT;
mext_out: mext_out:
fput(donor_filp); fput(donor_filp);
......
...@@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, ...@@ -441,10 +441,9 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
ext4_fsblk_t blocknr; ext4_fsblk_t blocknr;
blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += first + i; blocknr += first + i;
blocknr +=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group, ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode" __func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)", " %lu's block %llu(bit %u in group %u)",
...@@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, ...@@ -1255,10 +1254,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) { if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
ext4_fsblk_t blocknr; ext4_fsblk_t blocknr;
blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += block; blocknr += block;
blocknr +=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group, ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode" __func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)", " %lu's block %llu(bit %u in group %u)",
...@@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, ...@@ -1631,7 +1629,6 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
int max; int max;
int err; int err;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
struct ext4_free_extent ex; struct ext4_free_extent ex;
if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
...@@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, ...@@ -1648,8 +1645,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
ext4_fsblk_t start; ext4_fsblk_t start;
start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) + start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
ex.fe_start + le32_to_cpu(es->s_first_data_block); ex.fe_start;
/* use do_div to get remainder (would be 64-bit modulo) */ /* use do_div to get remainder (would be 64-bit modulo) */
if (do_div(start, sbi->s_stripe) == 0) { if (do_div(start, sbi->s_stripe) == 0) {
ac->ac_found++; ac->ac_found++;
...@@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, ...@@ -1803,8 +1800,8 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
BUG_ON(sbi->s_stripe == 0); BUG_ON(sbi->s_stripe == 0);
/* find first stripe-aligned block in group */ /* find first stripe-aligned block in group */
first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb) first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
+ le32_to_cpu(sbi->s_es->s_first_data_block);
a = first_group_block + sbi->s_stripe - 1; a = first_group_block + sbi->s_stripe - 1;
do_div(a, sbi->s_stripe); do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block; i = (a * sbi->s_stripe) - first_group_block;
...@@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ...@@ -2256,7 +2253,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem); init_rwsem(&meta_group_info[i]->alloc_sem);
meta_group_info[i]->bb_free_root.rb_node = NULL; meta_group_info[i]->bb_free_root = RB_ROOT;
#ifdef DOUBLE_CHECK #ifdef DOUBLE_CHECK
{ {
...@@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) ...@@ -2560,12 +2557,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
ext4_unlock_group(sb, entry->group); ext4_unlock_group(sb, entry->group);
if (test_opt(sb, DISCARD)) { if (test_opt(sb, DISCARD)) {
ext4_fsblk_t discard_block; ext4_fsblk_t discard_block;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
discard_block = (ext4_fsblk_t)entry->group * discard_block = entry->start_blk +
EXT4_BLOCKS_PER_GROUP(sb) ext4_group_first_block_no(sb, entry->group);
+ entry->start_blk
+ le32_to_cpu(es->s_first_data_block);
trace_ext4_discard_blocks(sb, trace_ext4_discard_blocks(sb,
(unsigned long long)discard_block, (unsigned long long)discard_block,
entry->count); entry->count);
...@@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2703,14 +2697,11 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (err) if (err)
goto out_err; goto out_err;
block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb) block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
+ ac->ac_b_ex.fe_start
+ le32_to_cpu(es->s_first_data_block);
len = ac->ac_b_ex.fe_len; len = ac->ac_b_ex.fe_len;
if (!ext4_data_block_valid(sbi, block, len)) { if (!ext4_data_block_valid(sbi, block, len)) {
ext4_error(sb, __func__, ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"Allocating blocks %llu-%llu which overlap "
"fs metadata\n", block, block+len); "fs metadata\n", block, block+len);
/* File system mounted not to panic on error /* File system mounted not to panic on error
* Fix the bitmap and repeat the block allocation * Fix the bitmap and repeat the block allocation
...@@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -3161,9 +3152,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* The max size of hash table is PREALLOC_TB_SIZE */ /* The max size of hash table is PREALLOC_TB_SIZE */
order = PREALLOC_TB_SIZE - 1; order = PREALLOC_TB_SIZE - 1;
goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) + goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
ac->ac_g_ex.fe_start +
le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
/* /*
* search for the prealloc space that is having * search for the prealloc space that is having
* minimal distance from the goal block. * minimal distance from the goal block.
...@@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, ...@@ -3526,8 +3515,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (bit >= end) if (bit >= end)
break; break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit); next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + start = ext4_group_first_block_no(sb, group) + bit;
le32_to_cpu(sbi->s_es->s_first_data_block);
mb_debug(1, " free preallocated %u/%u in group %u\n", mb_debug(1, " free preallocated %u/%u in group %u\n",
(unsigned) start, (unsigned) next - bit, (unsigned) start, (unsigned) next - bit,
(unsigned) group); (unsigned) group);
...@@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ...@@ -3623,15 +3611,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
bitmap_bh = ext4_read_block_bitmap(sb, group); bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) { if (bitmap_bh == NULL) {
ext4_error(sb, __func__, "Error in reading block " ext4_error(sb, "Error reading block bitmap for %u", group);
"bitmap for %u", group);
return 0; return 0;
} }
err = ext4_mb_load_buddy(sb, group, &e4b); err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) { if (err) {
ext4_error(sb, __func__, "Error in loading buddy " ext4_error(sb, "Error loading buddy information for %u", group);
"information for %u", group);
put_bh(bitmap_bh); put_bh(bitmap_bh);
return 0; return 0;
} }
...@@ -3804,15 +3790,15 @@ void ext4_discard_preallocations(struct inode *inode) ...@@ -3804,15 +3790,15 @@ void ext4_discard_preallocations(struct inode *inode)
err = ext4_mb_load_buddy(sb, group, &e4b); err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) { if (err) {
ext4_error(sb, __func__, "Error in loading buddy " ext4_error(sb, "Error loading buddy information for %u",
"information for %u", group); group);
continue; continue;
} }
bitmap_bh = ext4_read_block_bitmap(sb, group); bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) { if (bitmap_bh == NULL) {
ext4_error(sb, __func__, "Error in reading block " ext4_error(sb, "Error reading block bitmap for %u",
"bitmap for %u", group); group);
ext4_mb_release_desc(&e4b); ext4_mb_release_desc(&e4b);
continue; continue;
} }
...@@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) ...@@ -3938,7 +3924,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
/* don't use group allocation for large files */ /* don't use group allocation for large files */
size = max(size, isize); size = max(size, isize);
if (size >= sbi->s_mb_stream_request) { if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC; ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return; return;
} }
...@@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, ...@@ -4077,8 +4063,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
if (ext4_mb_load_buddy(sb, group, &e4b)) { if (ext4_mb_load_buddy(sb, group, &e4b)) {
ext4_error(sb, __func__, "Error in loading buddy " ext4_error(sb, "Error loading buddy information for %u",
"information for %u", group); group);
continue; continue;
} }
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
...@@ -4476,9 +4462,9 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4476,9 +4462,9 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
sbi = EXT4_SB(sb); sbi = EXT4_SB(sb);
es = EXT4_SB(sb)->s_es; es = EXT4_SB(sb)->s_es;
if (!ext4_data_block_valid(sbi, block, count)) { if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
ext4_error(sb, __func__, !ext4_data_block_valid(sbi, block, count)) {
"Freeing blocks not in datazone - " ext4_error(sb, "Freeing blocks not in datazone - "
"block = %llu, count = %lu", block, count); "block = %llu, count = %lu", block, count);
goto error_return; goto error_return;
} }
...@@ -4547,8 +4533,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4547,8 +4533,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
in_range(block + count - 1, ext4_inode_table(sb, gdp), in_range(block + count - 1, ext4_inode_table(sb, gdp),
EXT4_SB(sb)->s_itb_per_group)) { EXT4_SB(sb)->s_itb_per_group)) {
ext4_error(sb, __func__, ext4_error(sb, "Freeing blocks in system zone - "
"Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count); "Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */ /* err = 0. ext4_std_error should be a no op */
goto error_return; goto error_return;
......
...@@ -220,16 +220,9 @@ struct ext4_buddy { ...@@ -220,16 +220,9 @@ struct ext4_buddy {
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
struct ext4_free_extent *fex) struct ext4_free_extent *fex)
{ {
ext4_fsblk_t block; return ext4_group_first_block_no(sb, fex->fe_group) + fex->fe_start;
block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
+ fex->fe_start
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
return block;
} }
#endif #endif
...@@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, ...@@ -365,12 +365,12 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* happened after we started the migrate. We need to * happened after we started the migrate. We need to
* fail the migrate * fail the migrate
*/ */
if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) { if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
retval = -EAGAIN; retval = -EAGAIN;
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
goto err_out; goto err_out;
} else } else
EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
/* /*
* We have the extent map build with the tmp inode. * We have the extent map build with the tmp inode.
* Now copy the i_data across * Now copy the i_data across
...@@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -503,14 +503,10 @@ int ext4_ext_migrate(struct inode *inode)
} }
i_size_write(tmp_inode, i_size_read(inode)); i_size_write(tmp_inode, i_size_read(inode));
/* /*
* We don't want the inode to be reclaimed * Set the i_nlink to zero so it will be deleted later
* if we got interrupted in between. We have * when we drop inode reference.
* this tmp inode carrying reference to the
* data blocks of the original file. We set
* the i_nlink to zero at the last stage after
* switching the original file to extent format
*/ */
tmp_inode->i_nlink = 1; tmp_inode->i_nlink = 0;
ext4_ext_tree_init(handle, tmp_inode); ext4_ext_tree_init(handle, tmp_inode);
ext4_orphan_add(handle, tmp_inode); ext4_orphan_add(handle, tmp_inode);
...@@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -533,10 +529,20 @@ int ext4_ext_migrate(struct inode *inode)
* allocation. * allocation.
*/ */
down_read((&EXT4_I(inode)->i_data_sem)); down_read((&EXT4_I(inode)->i_data_sem));
EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE; ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
up_read((&EXT4_I(inode)->i_data_sem)); up_read((&EXT4_I(inode)->i_data_sem));
handle = ext4_journal_start(inode, 1); handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) {
/*
* It is impossible to update on-disk structures without
* a handle, so just rollback in-core changes and live other
* work to orphan_list_cleanup()
*/
ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
goto out;
}
ei = EXT4_I(inode); ei = EXT4_I(inode);
i_data = ei->i_data; i_data = ei->i_data;
...@@ -618,15 +624,8 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -618,15 +624,8 @@ int ext4_ext_migrate(struct inode *inode)
/* Reset the extent details */ /* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode); ext4_ext_tree_init(handle, tmp_inode);
/*
* Set the i_nlink to zero so that
* generic_drop_inode really deletes the
* inode
*/
tmp_inode->i_nlink = 0;
ext4_journal_stop(handle); ext4_journal_stop(handle);
out:
unlock_new_inode(tmp_inode); unlock_new_inode(tmp_inode);
iput(tmp_inode); iput(tmp_inode);
......
...@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2, ...@@ -152,12 +152,12 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2,
int ret = 0; int ret = 0;
if (inode1 == NULL) { if (inode1 == NULL) {
ext4_error(inode2->i_sb, function, __ext4_error(inode2->i_sb, function,
"Both inodes should not be NULL: " "Both inodes should not be NULL: "
"inode1 NULL inode2 %lu", inode2->i_ino); "inode1 NULL inode2 %lu", inode2->i_ino);
ret = -EIO; ret = -EIO;
} else if (inode2 == NULL) { } else if (inode2 == NULL) {
ext4_error(inode1->i_sb, function, __ext4_error(inode1->i_sb, function,
"Both inodes should not be NULL: " "Both inodes should not be NULL: "
"inode1 %lu inode2 NULL", inode1->i_ino); "inode1 %lu inode2 NULL", inode1->i_ino);
ret = -EIO; ret = -EIO;
...@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, ...@@ -252,6 +252,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
} }
o_start->ee_len = start_ext->ee_len; o_start->ee_len = start_ext->ee_len;
eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1; new_flag = 1;
} else if (start_ext->ee_len && new_ext->ee_len && } else if (start_ext->ee_len && new_ext->ee_len &&
...@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, ...@@ -262,6 +263,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
* orig |------------------------------| * orig |------------------------------|
*/ */
o_start->ee_len = start_ext->ee_len; o_start->ee_len = start_ext->ee_len;
eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1; new_flag = 1;
} else if (!start_ext->ee_len && new_ext->ee_len && } else if (!start_ext->ee_len && new_ext->ee_len &&
...@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ...@@ -475,7 +477,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
struct ext4_extent *oext, *o_start, *o_end, *prev_ext; struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
struct ext4_extent new_ext, start_ext, end_ext; struct ext4_extent new_ext, start_ext, end_ext;
ext4_lblk_t new_ext_end; ext4_lblk_t new_ext_end;
ext4_fsblk_t new_phys_end;
int oext_alen, new_ext_alen, end_ext_alen; int oext_alen, new_ext_alen, end_ext_alen;
int depth = ext_depth(orig_inode); int depth = ext_depth(orig_inode);
int ret; int ret;
...@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ...@@ -489,7 +490,6 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
new_ext.ee_len = dext->ee_len; new_ext.ee_len = dext->ee_len;
new_ext_alen = ext4_ext_get_actual_len(&new_ext); new_ext_alen = ext4_ext_get_actual_len(&new_ext);
new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
new_phys_end = ext_pblock(&new_ext) + new_ext_alen - 1;
/* /*
* Case: original extent is first * Case: original extent is first
...@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ...@@ -502,6 +502,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
le32_to_cpu(oext->ee_block) + oext_alen) { le32_to_cpu(oext->ee_block) + oext_alen) {
start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
le32_to_cpu(oext->ee_block)); le32_to_cpu(oext->ee_block));
start_ext.ee_block = oext->ee_block;
copy_extent_status(oext, &start_ext); copy_extent_status(oext, &start_ext);
} else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
prev_ext = oext - 1; prev_ext = oext - 1;
...@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ...@@ -515,6 +516,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
start_ext.ee_len = cpu_to_le16( start_ext.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(prev_ext) + ext4_ext_get_actual_len(prev_ext) +
new_ext_alen); new_ext_alen);
start_ext.ee_block = oext->ee_block;
copy_extent_status(prev_ext, &start_ext); copy_extent_status(prev_ext, &start_ext);
new_ext.ee_len = 0; new_ext.ee_len = 0;
} }
...@@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ...@@ -526,7 +528,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
* new_ext |-------| * new_ext |-------|
*/ */
if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
ext4_error(orig_inode->i_sb, __func__, ext4_error(orig_inode->i_sb,
"new_ext_end(%u) should be less than or equal to " "new_ext_end(%u) should be less than or equal to "
"oext->ee_block(%u) + oext_alen(%d) - 1", "oext->ee_block(%u) + oext_alen(%d) - 1",
new_ext_end, le32_to_cpu(oext->ee_block), new_ext_end, le32_to_cpu(oext->ee_block),
...@@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, ...@@ -689,12 +691,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
while (1) { while (1) {
/* The extent for donor must be found. */ /* The extent for donor must be found. */
if (!dext) { if (!dext) {
ext4_error(donor_inode->i_sb, __func__, ext4_error(donor_inode->i_sb,
"The extent for donor must be found"); "The extent for donor must be found");
*err = -EIO; *err = -EIO;
goto out; goto out;
} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
ext4_error(donor_inode->i_sb, __func__, ext4_error(donor_inode->i_sb,
"Donor offset(%u) and the first block of donor " "Donor offset(%u) and the first block of donor "
"extent(%u) should be equal", "extent(%u) should be equal",
donor_off, donor_off,
...@@ -928,7 +930,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, ...@@ -928,7 +930,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
} }
/** /**
* mext_check_argumants - Check whether move extent can be done * mext_check_arguments - Check whether move extent can be done
* *
* @orig_inode: original inode * @orig_inode: original inode
* @donor_inode: donor inode * @donor_inode: donor inode
...@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode, ...@@ -949,14 +951,6 @@ mext_check_arguments(struct inode *orig_inode,
unsigned int blkbits = orig_inode->i_blkbits; unsigned int blkbits = orig_inode->i_blkbits;
unsigned int blocksize = 1 << blkbits; unsigned int blocksize = 1 << blkbits;
/* Regular file check */
if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
ext4_debug("ext4 move extent: The argument files should be "
"regular file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
ext4_debug("ext4 move extent: suid or sgid is set" ext4_debug("ext4 move extent: suid or sgid is set"
" to donor file [ino:orig %lu, donor %lu]\n", " to donor file [ino:orig %lu, donor %lu]\n",
...@@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, ...@@ -1204,6 +1198,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
return -EINVAL; return -EINVAL;
} }
/* Regular file check */
if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
ext4_debug("ext4 move extent: The argument files should be "
"regular file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* Protect orig and donor inodes against a truncate */ /* Protect orig and donor inodes against a truncate */
ret1 = mext_inode_double_lock(orig_inode, donor_inode); ret1 = mext_inode_double_lock(orig_inode, donor_inode);
if (ret1 < 0) if (ret1 < 0)
...@@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, ...@@ -1351,7 +1353,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (ret1 < 0) if (ret1 < 0)
break; break;
if (*moved_len > len) { if (*moved_len > len) {
ext4_error(orig_inode->i_sb, __func__, ext4_error(orig_inode->i_sb,
"We replaced blocks too much! " "We replaced blocks too much! "
"sum of replaced: %llu requested: %llu", "sum of replaced: %llu requested: %llu",
*moved_len, len); *moved_len, len);
......
...@@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -383,8 +383,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (root->info.hash_version != DX_HASH_TEA && if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) { root->info.hash_version != DX_HASH_LEGACY) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
"Unrecognised inode hash code %d",
root->info.hash_version); root->info.hash_version);
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
...@@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -399,8 +398,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
hash = hinfo->hash; hash = hinfo->hash;
if (root->info.unused_flags & 1) { if (root->info.unused_flags & 1) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
"Unimplemented inode hash flags: %#06x",
root->info.unused_flags); root->info.unused_flags);
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
...@@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -408,8 +406,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
} }
if ((indirect = root->info.indirect_levels) > 1) { if ((indirect = root->info.indirect_levels) > 1) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
"Unimplemented inode hash depth: %#06x",
root->info.indirect_levels); root->info.indirect_levels);
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
...@@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -421,8 +418,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (dx_get_limit(entries) != dx_root_limit(dir, if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) { root->info.info_length)) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb, "dx entry: limit != root limit");
"dx entry: limit != root limit");
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
goto fail; goto fail;
...@@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -433,7 +429,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
{ {
count = dx_get_count(entries); count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) { if (!count || count > dx_get_limit(entries)) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb,
"dx entry: no count or count > limit"); "dx entry: no count or count > limit");
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
...@@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -478,7 +474,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
goto fail2; goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries; at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) { if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb,
"dx entry: limit != node limit"); "dx entry: limit != node limit");
brelse(bh); brelse(bh);
*err = ERR_BAD_DX_DIR; *err = ERR_BAD_DX_DIR;
...@@ -494,7 +490,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir, ...@@ -494,7 +490,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
} }
fail: fail:
if (*err == ERR_BAD_DX_DIR) if (*err == ERR_BAD_DX_DIR)
ext4_warning(dir->i_sb, __func__, ext4_warning(dir->i_sb,
"Corrupt dir inode %ld, running e2fsck is " "Corrupt dir inode %ld, running e2fsck is "
"recommended.", dir->i_ino); "recommended.", dir->i_ino);
return NULL; return NULL;
...@@ -947,9 +943,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, ...@@ -947,9 +943,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */ /* read error, skip block & hope for the best */
ext4_error(sb, __func__, "reading directory #%lu " ext4_error(sb, "reading directory #%lu offset %lu",
"offset %lu", dir->i_ino, dir->i_ino, (unsigned long)block);
(unsigned long)block);
brelse(bh); brelse(bh);
goto next; goto next;
} }
...@@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q ...@@ -1041,7 +1036,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
retval = ext4_htree_next_block(dir, hash, frame, retval = ext4_htree_next_block(dir, hash, frame,
frames, NULL); frames, NULL);
if (retval < 0) { if (retval < 0) {
ext4_warning(sb, __func__, ext4_warning(sb,
"error reading index page in directory #%lu", "error reading index page in directory #%lu",
dir->i_ino); dir->i_ino);
*err = retval; *err = retval;
...@@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru ...@@ -1071,14 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
__u32 ino = le32_to_cpu(de->inode); __u32 ino = le32_to_cpu(de->inode);
brelse(bh); brelse(bh);
if (!ext4_valid_inum(dir->i_sb, ino)) { if (!ext4_valid_inum(dir->i_sb, ino)) {
ext4_error(dir->i_sb, "ext4_lookup", ext4_error(dir->i_sb, "bad inode number: %u", ino);
"bad inode number: %u", ino);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
inode = ext4_iget(dir->i_sb, ino); inode = ext4_iget(dir->i_sb, ino);
if (unlikely(IS_ERR(inode))) { if (unlikely(IS_ERR(inode))) {
if (PTR_ERR(inode) == -ESTALE) { if (PTR_ERR(inode) == -ESTALE) {
ext4_error(dir->i_sb, __func__, ext4_error(dir->i_sb,
"deleted inode referenced: %u", "deleted inode referenced: %u",
ino); ino);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
...@@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child) ...@@ -1110,7 +1104,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
brelse(bh); brelse(bh);
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
ext4_error(child->d_inode->i_sb, "ext4_get_parent", ext4_error(child->d_inode->i_sb,
"bad inode number: %u", ino); "bad inode number: %u", ino);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
...@@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, ...@@ -1410,7 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
de = (struct ext4_dir_entry_2 *)((char *)fde + de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len, blocksize)); ext4_rec_len_from_disk(fde->rec_len, blocksize));
if ((char *) de >= (((char *) root) + blocksize)) { if ((char *) de >= (((char *) root) + blocksize)) {
ext4_error(dir->i_sb, __func__, ext4_error(dir->i_sb,
"invalid rec_len for '..' in inode %lu", "invalid rec_len for '..' in inode %lu",
dir->i_ino); dir->i_ino);
brelse(bh); brelse(bh);
...@@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, ...@@ -1575,8 +1569,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (levels && (dx_get_count(frames->entries) == if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) { dx_get_limit(frames->entries))) {
ext4_warning(sb, __func__, ext4_warning(sb, "Directory index full!");
"Directory index full!");
err = -ENOSPC; err = -ENOSPC;
goto cleanup; goto cleanup;
} }
...@@ -1916,11 +1909,11 @@ static int empty_dir(struct inode *inode) ...@@ -1916,11 +1909,11 @@ static int empty_dir(struct inode *inode)
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) { !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err) if (err)
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb,
"error %d reading directory #%lu offset 0", "error %d reading directory #%lu offset 0",
err, inode->i_ino); err, inode->i_ino);
else else
ext4_warning(inode->i_sb, __func__, ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no data block", "bad directory (dir #%lu) - no data block",
inode->i_ino); inode->i_ino);
return 1; return 1;
...@@ -1931,7 +1924,7 @@ static int empty_dir(struct inode *inode) ...@@ -1931,7 +1924,7 @@ static int empty_dir(struct inode *inode)
!le32_to_cpu(de1->inode) || !le32_to_cpu(de1->inode) ||
strcmp(".", de->name) || strcmp(".", de->name) ||
strcmp("..", de1->name)) { strcmp("..", de1->name)) {
ext4_warning(inode->i_sb, "empty_dir", ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no `.' or `..'", "bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino); inode->i_ino);
brelse(bh); brelse(bh);
...@@ -1949,7 +1942,7 @@ static int empty_dir(struct inode *inode) ...@@ -1949,7 +1942,7 @@ static int empty_dir(struct inode *inode)
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) { if (!bh) {
if (err) if (err)
ext4_error(sb, __func__, ext4_error(sb,
"error %d reading directory" "error %d reading directory"
" #%lu offset %u", " #%lu offset %u",
err, inode->i_ino, offset); err, inode->i_ino, offset);
...@@ -2020,11 +2013,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) ...@@ -2020,11 +2013,18 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
err = ext4_reserve_inode_write(handle, inode, &iloc); err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err) if (err)
goto out_unlock; goto out_unlock;
/*
* Due to previous errors inode may be already a part of on-disk
* orphan list. If so skip on-disk list modification.
*/
if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
(le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
goto mem_insert;
/* Insert this inode at the head of the on-disk orphan list... */ /* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh); err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc); rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err) if (!err)
err = rc; err = rc;
...@@ -2037,6 +2037,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) ...@@ -2037,6 +2037,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
* *
* This is safe: on error we're going to ignore the orphan list * This is safe: on error we're going to ignore the orphan list
* anyway on the next recovery. */ * anyway on the next recovery. */
mem_insert:
if (!err) if (!err)
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
...@@ -2096,7 +2097,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) ...@@ -2096,7 +2097,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (err) if (err)
goto out_brelse; goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
} else { } else {
struct ext4_iloc iloc2; struct ext4_iloc iloc2;
struct inode *i_prev = struct inode *i_prev =
...@@ -2163,7 +2164,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) ...@@ -2163,7 +2164,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
if (retval) if (retval)
goto end_rmdir; goto end_rmdir;
if (!EXT4_DIR_LINK_EMPTY(inode)) if (!EXT4_DIR_LINK_EMPTY(inode))
ext4_warning(inode->i_sb, "ext4_rmdir", ext4_warning(inode->i_sb,
"empty directory has too many links (%d)", "empty directory has too many links (%d)",
inode->i_nlink); inode->i_nlink);
inode->i_version++; inode->i_version++;
...@@ -2215,7 +2216,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) ...@@ -2215,7 +2216,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
goto end_unlink; goto end_unlink;
if (!inode->i_nlink) { if (!inode->i_nlink) {
ext4_warning(inode->i_sb, "ext4_unlink", ext4_warning(inode->i_sb,
"Deleting nonexistent file (%lu), %d", "Deleting nonexistent file (%lu), %d",
inode->i_ino, inode->i_nlink); inode->i_ino, inode->i_nlink);
inode->i_nlink = 1; inode->i_nlink = 1;
...@@ -2462,7 +2463,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -2462,7 +2463,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
} }
} }
if (retval) { if (retval) {
ext4_warning(old_dir->i_sb, "ext4_rename", ext4_warning(old_dir->i_sb,
"Deleting old file (%lu), %d, error=%d", "Deleting old file (%lu), %d, error=%d",
old_dir->i_ino, old_dir->i_nlink, retval); old_dir->i_ino, old_dir->i_nlink, retval);
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, ...@@ -227,7 +227,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_bdebug(bh, "b_count=%d, refcount=%d", ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) { if (ext4_xattr_check_block(bh)) {
bad_block: ext4_error(inode->i_sb, __func__, bad_block:
ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino, "inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl); EXT4_I(inode)->i_file_acl);
error = -EIO; error = -EIO;
...@@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, ...@@ -267,7 +268,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
void *end; void *end;
int error; int error;
if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return -ENODATA; return -ENODATA;
error = ext4_get_inode_loc(inode, &iloc); error = ext4_get_inode_loc(inode, &iloc);
if (error) if (error)
...@@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) ...@@ -371,7 +372,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ea_bdebug(bh, "b_count=%d, refcount=%d", ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) { if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino, "inode %lu: bad block %llu", inode->i_ino,
EXT4_I(inode)->i_file_acl); EXT4_I(inode)->i_file_acl);
error = -EIO; error = -EIO;
...@@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) ...@@ -396,7 +397,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
void *end; void *end;
int error; int error;
if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
return 0; return 0;
error = ext4_get_inode_loc(inode, &iloc); error = ext4_get_inode_loc(inode, &iloc);
if (error) if (error)
...@@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, ...@@ -665,9 +666,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
atomic_read(&(bs->bh->b_count)), atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount)); le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext4_xattr_check_block(bs->bh)) { if (ext4_xattr_check_block(bs->bh)) {
ext4_error(sb, __func__, ext4_error(sb, "inode %lu: bad block %llu",
"inode %lu: bad block %llu", inode->i_ino, inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_I(inode)->i_file_acl);
error = -EIO; error = -EIO;
goto cleanup; goto cleanup;
} }
...@@ -880,9 +880,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -880,9 +880,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto cleanup; goto cleanup;
bad_block: bad_block:
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb, "inode %lu: bad block %llu",
"inode %lu: bad block %llu", inode->i_ino, inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_I(inode)->i_file_acl);
goto cleanup; goto cleanup;
#undef header #undef header
...@@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, ...@@ -908,7 +907,7 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
is->s.base = is->s.first = IFIRST(header); is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first; is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
error = ext4_xattr_check_names(IFIRST(header), is->s.end); error = ext4_xattr_check_names(IFIRST(header), is->s.end);
if (error) if (error)
return error; return error;
...@@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, ...@@ -940,10 +939,10 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
header = IHDR(inode, ext4_raw_inode(&is->iloc)); header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) { if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
EXT4_I(inode)->i_state |= EXT4_STATE_XATTR; ext4_set_inode_state(inode, EXT4_STATE_XATTR);
} else { } else {
header->h_magic = cpu_to_le32(0); header->h_magic = cpu_to_le32(0);
EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR; ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
} }
return 0; return 0;
} }
...@@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, ...@@ -986,8 +985,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (strlen(name) > 255) if (strlen(name) > 255)
return -ERANGE; return -ERANGE;
down_write(&EXT4_I(inode)->xattr_sem); down_write(&EXT4_I(inode)->xattr_sem);
no_expand = EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND; no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_get_inode_loc(inode, &is.iloc); error = ext4_get_inode_loc(inode, &is.iloc);
if (error) if (error)
...@@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, ...@@ -997,10 +996,10 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (error) if (error)
goto cleanup; goto cleanup;
if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW; ext4_clear_inode_state(inode, EXT4_STATE_NEW);
} }
error = ext4_xattr_ibody_find(inode, &i, &is); error = ext4_xattr_ibody_find(inode, &i, &is);
...@@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, ...@@ -1052,7 +1051,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
ext4_xattr_update_super_block(handle, inode->i_sb); ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode); inode->i_ctime = ext4_current_time(inode);
if (!value) if (!value)
EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/* /*
* The bh is consumed by ext4_mark_iloc_dirty, even with * The bh is consumed by ext4_mark_iloc_dirty, even with
...@@ -1067,7 +1066,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, ...@@ -1067,7 +1066,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
brelse(is.iloc.bh); brelse(is.iloc.bh);
brelse(bs.bh); brelse(bs.bh);
if (no_expand == 0) if (no_expand == 0)
EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND; ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
up_write(&EXT4_I(inode)->xattr_sem); up_write(&EXT4_I(inode)->xattr_sem);
return error; return error;
} }
...@@ -1195,9 +1194,8 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, ...@@ -1195,9 +1194,8 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
if (!bh) if (!bh)
goto cleanup; goto cleanup;
if (ext4_xattr_check_block(bh)) { if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb, "inode %lu: bad block %llu",
"inode %lu: bad block %llu", inode->i_ino, inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_I(inode)->i_file_acl);
error = -EIO; error = -EIO;
goto cleanup; goto cleanup;
} }
...@@ -1302,6 +1300,8 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, ...@@ -1302,6 +1300,8 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
/* Remove the chosen entry from the inode */ /* Remove the chosen entry from the inode */
error = ext4_xattr_ibody_set(handle, inode, &i, is); error = ext4_xattr_ibody_set(handle, inode, &i, is);
if (error)
goto cleanup;
entry = IFIRST(header); entry = IFIRST(header);
if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
...@@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) ...@@ -1372,16 +1372,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
goto cleanup; goto cleanup;
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
if (!bh) { if (!bh) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb, "inode %lu: block %llu read error",
"inode %lu: block %llu read error", inode->i_ino, inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_I(inode)->i_file_acl);
goto cleanup; goto cleanup;
} }
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) { BHDR(bh)->h_blocks != cpu_to_le32(1)) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb, "inode %lu: bad block %llu",
"inode %lu: bad block %llu", inode->i_ino, inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_I(inode)->i_file_acl);
goto cleanup; goto cleanup;
} }
ext4_xattr_release_block(handle, inode, bh); ext4_xattr_release_block(handle, inode, bh);
...@@ -1506,7 +1504,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, ...@@ -1506,7 +1504,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
} }
bh = sb_bread(inode->i_sb, ce->e_block); bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) { if (!bh) {
ext4_error(inode->i_sb, __func__, ext4_error(inode->i_sb,
"inode %lu: block %lu read error", "inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block); inode->i_ino, (unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >= } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
......
...@@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) ...@@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
if (blocknr < journal->j_tail) if (blocknr < journal->j_tail)
freed = freed + journal->j_last - journal->j_first; freed = freed + journal->j_last - journal->j_first;
trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed);
jbd_debug(1, jbd_debug(1,
"Cleaning journal tail from %d to %d (offset %lu), " "Cleaning journal tail from %d to %d (offset %lu), "
"freeing %lu\n", "freeing %lu\n",
......
...@@ -883,8 +883,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -883,8 +883,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh); bh = jh2bh(jh);
jbd_lock_bh_state(bh); jbd_lock_bh_state(bh);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
jh->b_transaction == journal->j_running_transaction);
/* /*
* If there is undo-protected committed data against * If there is undo-protected committed data against
...@@ -930,12 +929,12 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -930,12 +929,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* A buffer which has been freed while still being /* A buffer which has been freed while still being
* journaled by a previous transaction may end up still * journaled by a previous transaction may end up still
* being dirty here, but we want to avoid writing back * being dirty here, but we want to avoid writing back
* that buffer in the future now that the last use has * that buffer in the future after the "add to orphan"
* been committed. That's not only a performance gain, * operation been committed, That's not only a performance
* it also stops aliasing problems if the buffer is left * gain, it also stops aliasing problems if the buffer is
* behind for writeback and gets reallocated for another * left behind for writeback and gets reallocated for another
* use in a different page. */ * use in a different page. */
if (buffer_freed(bh)) { if (buffer_freed(bh) && !jh->b_next_transaction) {
clear_buffer_freed(bh); clear_buffer_freed(bh);
clear_buffer_jbddirty(bh); clear_buffer_jbddirty(bh);
} }
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/log2.h>
#include <linux/vmalloc.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/jbd2.h> #include <trace/events/jbd2.h>
...@@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); ...@@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno); static void __journal_abort_soft (journal_t *journal, int errno);
static int jbd2_journal_create_slab(size_t slab_size);
/* /*
* Helper function used to manage commit timeouts * Helper function used to manage commit timeouts
...@@ -1248,6 +1251,13 @@ int jbd2_journal_load(journal_t *journal) ...@@ -1248,6 +1251,13 @@ int jbd2_journal_load(journal_t *journal)
} }
} }
/*
* Create a slab for this blocksize
*/
err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize));
if (err)
return err;
/* Let the recovery code check whether it needs to recover any /* Let the recovery code check whether it needs to recover any
* data from the journal. */ * data from the journal. */
if (jbd2_journal_recover(journal)) if (jbd2_journal_recover(journal))
...@@ -1806,6 +1816,127 @@ size_t journal_tag_bytes(journal_t *journal) ...@@ -1806,6 +1816,127 @@ size_t journal_tag_bytes(journal_t *journal)
return JBD2_TAG_SIZE32; return JBD2_TAG_SIZE32;
} }
/*
* JBD memory management
*
* These functions are used to allocate block-sized chunks of memory
* used for making copies of buffer_head data. Very often it will be
* page-sized chunks of data, but sometimes it will be in
* sub-page-size chunks. (For example, 16k pages on Power systems
* with a 4k block file system.) For blocks smaller than a page, we
* use a SLAB allocator. There are slab caches for each block size,
* which are allocated at mount time, if necessary, and we only free
* (all of) the slab caches when/if the jbd2 module is unloaded. For
* this reason we don't need to a mutex to protect access to
* jbd2_slab[] allocating or releasing memory; only in
* jbd2_journal_create_slab().
*/
#define JBD2_MAX_SLABS 8
static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
static DECLARE_MUTEX(jbd2_slab_create_sem);
static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
"jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
"jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k"
};
static void jbd2_journal_destroy_slabs(void)
{
int i;
for (i = 0; i < JBD2_MAX_SLABS; i++) {
if (jbd2_slab[i])
kmem_cache_destroy(jbd2_slab[i]);
jbd2_slab[i] = NULL;
}
}
static int jbd2_journal_create_slab(size_t size)
{
int i = order_base_2(size) - 10;
size_t slab_size;
if (size == PAGE_SIZE)
return 0;
if (i >= JBD2_MAX_SLABS)
return -EINVAL;
if (unlikely(i < 0))
i = 0;
down(&jbd2_slab_create_sem);
if (jbd2_slab[i]) {
up(&jbd2_slab_create_sem);
return 0; /* Already created */
}
slab_size = 1 << (i+10);
jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
slab_size, 0, NULL);
up(&jbd2_slab_create_sem);
if (!jbd2_slab[i]) {
printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
return -ENOMEM;
}
return 0;
}
static struct kmem_cache *get_slab(size_t size)
{
int i = order_base_2(size) - 10;
BUG_ON(i >= JBD2_MAX_SLABS);
if (unlikely(i < 0))
i = 0;
BUG_ON(jbd2_slab[i] == 0);
return jbd2_slab[i];
}
void *jbd2_alloc(size_t size, gfp_t flags)
{
void *ptr;
BUG_ON(size & (size-1)); /* Must be a power of 2 */
flags |= __GFP_REPEAT;
if (size == PAGE_SIZE)
ptr = (void *)__get_free_pages(flags, 0);
else if (size > PAGE_SIZE) {
int order = get_order(size);
if (order < 3)
ptr = (void *)__get_free_pages(flags, order);
else
ptr = vmalloc(size);
} else
ptr = kmem_cache_alloc(get_slab(size), flags);
/* Check alignment; SLUB has gotten this wrong in the past,
* and this can lead to user data corruption! */
BUG_ON(((unsigned long) ptr) & (size-1));
return ptr;
}
void jbd2_free(void *ptr, size_t size)
{
if (size == PAGE_SIZE) {
free_pages((unsigned long)ptr, 0);
return;
}
if (size > PAGE_SIZE) {
int order = get_order(size);
if (order < 3)
free_pages((unsigned long)ptr, order);
else
vfree(ptr);
return;
}
kmem_cache_free(get_slab(size), ptr);
};
/* /*
* Journal_head storage management * Journal_head storage management
*/ */
...@@ -2204,6 +2335,7 @@ static void jbd2_journal_destroy_caches(void) ...@@ -2204,6 +2335,7 @@ static void jbd2_journal_destroy_caches(void)
jbd2_journal_destroy_revoke_caches(); jbd2_journal_destroy_revoke_caches();
jbd2_journal_destroy_jbd2_journal_head_cache(); jbd2_journal_destroy_jbd2_journal_head_cache();
jbd2_journal_destroy_handle_cache(); jbd2_journal_destroy_handle_cache();
jbd2_journal_destroy_slabs();
} }
static int __init journal_init(void) static int __init journal_init(void)
......
...@@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) ...@@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
if (!jh) if (!jh)
goto zap_buffer_no_jh; goto zap_buffer_no_jh;
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
* is committed. Otherwise if the transaction changing the
* buffer would be cleaned from the journal before T is
* committed, a crash will cause that the correct contents of
* the buffer will be lost. On the other hand we have to
* clear the buffer dirty bit at latest at the moment when the
* transaction marking the buffer as freed in the filesystem
* structures is committed because from that moment on the
* buffer can be reallocated and used by a different page.
* Since the block hasn't been freed yet but the inode has
* already been added to orphan list, it is safe for us to add
* the buffer to BJ_Forget list of the newest transaction.
*/
transaction = jh->b_transaction; transaction = jh->b_transaction;
if (transaction == NULL) { if (transaction == NULL) {
/* First case: not on any transaction. If it /* First case: not on any transaction. If it
...@@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) ...@@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
} else if (transaction == journal->j_committing_transaction) { } else if (transaction == journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "on committing transaction"); JBUFFER_TRACE(jh, "on committing transaction");
/* /*
* If it is committing, we simply cannot touch it. We * The buffer is committing, we simply cannot touch
* can remove it's next_transaction pointer from the * it. So we just set j_next_transaction to the
* running transaction if that is set, but nothing * running transaction (if there is one) and mark
* else. */ * buffer as freed so that commit code knows it should
* clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh); set_buffer_freed(bh);
if (jh->b_next_transaction) { if (journal->j_running_transaction && buffer_jbddirty(bh))
J_ASSERT(jh->b_next_transaction == jh->b_next_transaction = journal->j_running_transaction;
journal->j_running_transaction);
jh->b_next_transaction = NULL;
}
jbd2_journal_put_journal_head(jh); jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);
...@@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh, ...@@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
*/ */
void __jbd2_journal_refile_buffer(struct journal_head *jh) void __jbd2_journal_refile_buffer(struct journal_head *jh)
{ {
int was_dirty; int was_dirty, jlist;
struct buffer_head *bh = jh2bh(jh); struct buffer_head *bh = jh2bh(jh);
J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
...@@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) ...@@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
__jbd2_journal_temp_unlink_buffer(jh); __jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction; jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL; jh->b_next_transaction = NULL;
__jbd2_journal_file_buffer(jh, jh->b_transaction, if (buffer_freed(bh))
jh->b_modified ? BJ_Metadata : BJ_Reserved); jlist = BJ_Forget;
else if (jh->b_modified)
jlist = BJ_Metadata;
else
jlist = BJ_Reserved;
__jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty) if (was_dirty)
......
...@@ -69,15 +69,8 @@ extern u8 jbd2_journal_enable_debug; ...@@ -69,15 +69,8 @@ extern u8 jbd2_journal_enable_debug;
#define jbd_debug(f, a...) /**/ #define jbd_debug(f, a...) /**/
#endif #endif
static inline void *jbd2_alloc(size_t size, gfp_t flags) extern void *jbd2_alloc(size_t size, gfp_t flags);
{ extern void jbd2_free(void *ptr, size_t size);
return (void *)__get_free_pages(flags, get_order(size));
}
static inline void jbd2_free(void *ptr, size_t size)
{
free_pages((unsigned long)ptr, get_order(size));
};
#define JBD2_MIN_JOURNAL_BLOCKS 1024 #define JBD2_MIN_JOURNAL_BLOCKS 1024
......
...@@ -874,6 +874,107 @@ TRACE_EVENT(ext4_forget, ...@@ -874,6 +874,107 @@ TRACE_EVENT(ext4_forget,
__entry->mode, __entry->is_metadata, __entry->block) __entry->mode, __entry->is_metadata, __entry->block)
); );
TRACE_EVENT(ext4_da_update_reserve_space,
TP_PROTO(struct inode *inode, int used_blocks),
TP_ARGS(inode, used_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
__field( int, used_blocks )
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
__field( int, allocated_meta_blocks )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
__entry->used_blocks = used_blocks;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
),
TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->mode, (unsigned long long) __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
TRACE_EVENT(ext4_da_reserve_space,
TP_PROTO(struct inode *inode, int md_needed),
TP_ARGS(inode, md_needed),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
__field( int, md_needed )
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
__entry->md_needed = md_needed;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
),
TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->mode, (unsigned long long) __entry->i_blocks,
__entry->md_needed, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks)
);
TRACE_EVENT(ext4_da_release_space,
TP_PROTO(struct inode *inode, int freed_blocks),
TP_ARGS(inode, freed_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( __u64, i_blocks )
__field( int, freed_blocks )
__field( int, reserved_data_blocks )
__field( int, reserved_meta_blocks )
__field( int, allocated_meta_blocks )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->i_blocks = inode->i_blocks;
__entry->freed_blocks = freed_blocks;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
),
TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->mode, (unsigned long long) __entry->i_blocks,
__entry->freed_blocks, __entry->reserved_data_blocks,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
#endif /* _TRACE_EXT4_H */ #endif /* _TRACE_EXT4_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -199,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats, ...@@ -199,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->forced_to_close, __entry->written, __entry->dropped) __entry->forced_to_close, __entry->written, __entry->dropped)
); );
TRACE_EVENT(jbd2_cleanup_journal_tail,
TP_PROTO(journal_t *journal, tid_t first_tid,
unsigned long block_nr, unsigned long freed),
TP_ARGS(journal, first_tid, block_nr, freed),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( tid_t, tail_sequence )
__field( tid_t, first_tid )
__field(unsigned long, block_nr )
__field(unsigned long, freed )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->tail_sequence = journal->j_tail_sequence;
__entry->first_tid = first_tid;
__entry->block_nr = block_nr;
__entry->freed = freed;
),
TP_printk("dev %s from %u to %u offset %lu freed %lu",
jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
__entry->first_tid, __entry->block_nr, __entry->freed)
);
#endif /* _TRACE_JBD2_H */ #endif /* _TRACE_JBD2_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment